content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def admin_user_detail():
"""管理员信息编辑详情页"""
if not g.user.is_admin:
return redirect('/')
if request.method == 'GET':
# 获取参数
admin_id = request.args.get('admin_id')
if not admin_id:
abort(404)
try:
admin_id = int(admin_id)
except Exception as e:
current_app.logger.error(e)
return render_template('admin/admin_text_edit.html', data={"errmsg": "参数错误"})
# 通过id查询新闻
admin_user_dict = None
try:
admin_user_dict = User.query.get(admin_id)
except Exception as e:
current_app.logger.error(e)
if not admin_user_dict:
return render_template('admin/admin_text_edit.html', data={"errmsg": "未查询到此配置信息"})
# 返回数据
data = {
"admin_user_dict": admin_user_dict.to_dict(),
}
return render_template('admin/admin_user_detail.html', data=data)
# 获取post请求参数
admin_id = request.form.get("admin_id")
nick_name = request.form.get("nick_name")
password = request.form.get("password")
mobile = request.form.get("mobile")
signature = request.form.get("signature")
gender = request.form.get("gender")
avatar_url = request.files.get("avatar_url")
# 1.1 判断数据是否有值
if not all([nick_name, admin_id, mobile, gender]):
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 查询指定id的新闻
try:
user = User.query.get(admin_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if not user:
return jsonify(errno=RET.NODATA, errmsg="未查询到新闻数据")
# 1.2 尝试读取图片
if avatar_url:
try:
wxcode_image = avatar_url.read()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 2. 将标题图片上传到七牛
try:
key = storage(wxcode_image)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误")
user.avatar_url = constants.QINIU_DOMIN_PREFIX + key
if password:
user.password = password
# 3. 设置相关数据
user.nick_name = nick_name
user.mobile = mobile
user.signature = signature
user.gender = gender
return jsonify(errno=RET.OK, errmsg='OK') | 5,332,700 |
def splitBinNum(binNum):
"""Split an alternate block number into latitude and longitude parts.
Args:
binNum (int): Alternative block number
Returns:
:tuple Tuple:
1. (int) Latitude portion of the alternate block number.
Example: ``614123`` => ``614``
2. (int) Longitude portion of the alternate block number.
Example: ``614123`` => ``123``
"""
latBin = int(binNum / 1000)
longBin = binNum - (latBin * 1000)
return (latBin, longBin) | 5,332,701 |
def load_model_from_params_file(model):
"""
case 0: CHECKPOINT.CONVERT_MODEL = True:
Convert the model
case 1: CHECKPOINT.RESUME = False and TRAIN.PARAMS_FILE is not none:
load params_file
case 2: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is not none:
case 2a: if checkpoint exist: use checkpoint
case 2b: if checkpoint not exist: use params_file
case 3: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is none:
case 3a: if checkpoint exist: use checkpoint
case 3b: if checkpoint not exist: set start_model_iter = 0
"""
use_checkpoint = cfg.CHECKPOINT.RESUME and find_checkpoint()
logger.info("Resume training: {}". format(cfg.CHECKPOINT.RESUME))
if cfg.TRAIN.PARAMS_FILE and cfg.CHECKPOINT.CONVERT_MODEL:
# After convert model, should use affine layer
assert(cfg.MODEL.USE_AFFINE)
converted_checkpoint = convert_model(cfg.TRAIN.PARAMS_FILE)
logger.info('Checkpoint model converted')
cfg.TRAIN.PARAMS_FILE = converted_checkpoint
if cfg.TRAIN.PARAMS_FILE and not use_checkpoint:
logger.info('Initializing from pre-trained file...')
start_model_iter, prev_lr = initialize_params_from_file(
model=model, weights_file=cfg.TRAIN.PARAMS_FILE,
load_momentum=False, # We don't load momentum if it is pretrained.
)
logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format(
start_model_iter, prev_lr))
model.current_lr = prev_lr
# Correct start_model_iter if pretraining uses a different batch size
# (mainly used for 1-node warmup).
if cfg.TRAIN.RESUME_FROM_BATCH_SIZE > 0:
start_model_iter = misc.resume_from(start_model_iter)
# If we only want the weights.
if cfg.TRAIN.RESET_START_ITER:
start_model_iter = 0
elif use_checkpoint:
logger.info('Initializing from checkpoints...')
start_model_iter, prev_lr = initialize_params_from_file(
model=model, weights_file=get_checkpoint_resume_file())
logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format(
start_model_iter, prev_lr))
model.current_lr = prev_lr
else:
start_model_iter = 0
logger.info('No checkpoint found; training from scratch...')
return start_model_iter | 5,332,702 |
def getPool(pool_type='avg', gmp_lambda=1e3, lse_r=10):
"""
# NOTE: this function is not used in writer_ident, s. constructor of
# ResNet50Encoder
params
pool_type: the allowed pool types
gmp_lambda: the initial regularization parameter for GMP
lse_r: the initial regularization parameter for LSE
"""
if pool_type == 'gmp':
pool_layer = GMP(lamb=gmp_lambda)
elif pool_type == 'avg':
pool_layer = nn.AdaptiveAvgPool2d(1)
elif pool_type == 'max':
pool_layer = nn.AdaptiveMaxPool2d(1)
elif pool_type == 'mixed-pool':
pool_layer = MixedPool(0.5)
elif pool_type == 'lse':
pool_layer = LSEPool(lse_r)
else:
raise RuntimeError('{} is not a valid pooling'
' strategy.'.format(pool_type))
return pool_layer | 5,332,703 |
def test_id_l071_id_l071_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for keyref definition, field xpath='attribute::*' , selector contains
*
"""
assert_bindings(
schema="msData/identityConstraint/idL071.xsd",
instance="msData/identityConstraint/idL071.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,332,704 |
def draw_transperency(image, mask, color_f, color_b):
"""
image (np.uint8)
mask (np.float32) range from 0 to 1
"""
mask = mask.round()
alpha = np.zeros_like(image, dtype=np.uint8)
alpha[mask == 1, :] = color_f
alpha[mask == 0, :] = color_b
image_alpha = cv2.add(image, alpha)
return image_alpha | 5,332,705 |
def split_in_pairs(s, padding = "0"):
"""
Takes a string and splits into an iterable of strings of two characters each.
Made to break up a hex string into octets, so default is to pad an odd length
string with a 0 in front. An alternative character may be specified as the
second argument.
"""
if not isinstance(padding, str) or len(padding) != 1:
raise TypeError("Padding must be a single character.")
s = padding + s if len(s) % 2 else s
v = iter(s)
return (a+b for a,b in zip(v,v)) | 5,332,706 |
def make_slicer_query_with_totals_and_references(
database,
table,
joins,
dimensions,
metrics,
operations,
filters,
references,
orders,
share_dimensions=(),
):
"""
:param dataset:
:param database:
:param table:
:param joins:
:param dimensions:
:param metrics:
:param operations:
:param filters:
:param references:
:param orders:
:param share_dimensions:
:return:
"""
"""
The following two loops will run over the spread of the two sets including a NULL value in each set:
- reference group (WoW, MoM, etc.)
- dimension with roll up/totals enabled (totals dimension)
This will result in at least one query where the reference group and totals dimension is NULL, which shall be
called base query. The base query will ALWAYS be present, even if there are zero reference groups or totals
dimensions.
For a concrete example, check the test case in :
```
fireant.tests.queries.test_build_dimensions.QueryBuilderDimensionTotalsTests
#test_build_query_with_totals_cat_dimension_with_references
```
"""
totals_dimensions = find_totals_dimensions(dimensions, share_dimensions)
totals_dimensions_and_none = [None] + totals_dimensions[::-1]
reference_groups = find_and_group_references_for_dimensions(dimensions, references)
reference_groups_and_none = [(None, None)] + list(reference_groups.items())
queries = []
for totals_dimension in totals_dimensions_and_none:
(dimensions_with_totals, filters_with_totals) = adapt_for_totals_query(
totals_dimension, dimensions, filters
)
for reference_parts, references in reference_groups_and_none:
dimensions_with_ref, metrics_with_ref, filters_with_ref = adapt_for_reference_query(
reference_parts,
database,
dimensions_with_totals,
metrics,
filters_with_totals,
references,
)
query = make_slicer_query(
database,
table,
joins,
dimensions_with_ref,
metrics_with_ref,
filters_with_ref,
orders,
)
# Add these to the query instance so when the data frames are joined together, the correct references and
# totals can be applied when combining the separate result set from each query.
query._totals = totals_dimension
query._references = references
queries.append(query)
return queries | 5,332,707 |
def corr_na(array1, array2, corr_method: str = 'spearmanr', **addl_kws):
"""Correlation method that tolerates missing values. Can take pearsonr or spearmanr.
Args:
array1: Vector of values
array2: Vector of values
corr_method: Which method to use, pearsonr or spearmanr.
**addl_kws: Additional keyword args to pass to scipy.stats corr methods.
Returns: R and p-value from correlation of 2 vectors.
"""
if corr_method not in ['pearsonr', 'spearmanr']:
raise ValueError(
'Method %s is a valid correlation method, must be: %s'
% (corr_method, ','.join(['pearsonr', 'spearmanr']))
)
nonull = np.logical_and(not_na(array1), not_na(array2))
if sum(nonull) > 2:
return eval(corr_method)(array1[nonull], array2[nonull], **addl_kws)
return np.nan, np.nan | 5,332,708 |
def superCtx(*args, **kwargs):
"""
Flags:
- attach : a (unicode) []
- exists : ex (bool) []
- image1 : i1 (unicode) []
- image2 : i2 (unicode) []
- image3 : i3 (unicode) []
Derived from mel command `maya.cmds.superCtx`
"""
pass | 5,332,709 |
def directory_generator(dirname, trim=0):
"""
yields a tuple of (relative filename, chunking function). The
chunking function can be called to open and iterate over the
contents of the filename.
"""
def gather(collect, dirname, fnames):
for fname in fnames:
df = join(dirname, fname)
if not isdir(df):
collect.append(df)
collect = list()
walk(dirname, gather, collect)
for fname in collect:
yield fname[trim:], file_chunk(fname) | 5,332,710 |
def analyze_member_access(name: str,
typ: Type,
node: Context,
is_lvalue: bool,
is_super: bool,
is_operator: bool,
builtin_type: Callable[[str], Instance],
not_ready_callback: Callable[[str, Context], None],
msg: MessageBuilder,
override_info: TypeInfo = None,
report_type: Type = None,
chk: 'mypy.checker.TypeChecker' = None) -> Type:
"""Analyse attribute access.
This is a general operation that supports various different variations:
1. lvalue or non-lvalue access (i.e. setter or getter access)
2. supertype access (when using super(); is_super == True and
override_info should refer to the supertype)
"""
report_type = report_type or typ
if isinstance(typ, Instance):
if name == '__init__' and not is_super:
# Accessing __init__ in statically typed code would compromise
# type safety unless used via super().
msg.fail(messages.CANNOT_ACCESS_INIT, node)
return AnyType()
# The base object has an instance type.
info = typ.type
if override_info:
info = override_info
if (experiments.find_occurrences and
info.name() == experiments.find_occurrences[0] and
name == experiments.find_occurrences[1]):
msg.note("Occurrence of '{}.{}'".format(*experiments.find_occurrences), node)
# Look up the member. First look up the method dictionary.
method = info.get_method(name)
if method:
if method.is_property:
assert isinstance(method, OverloadedFuncDef)
return analyze_var(name, method.items[0].var, typ, info, node, is_lvalue, msg,
not_ready_callback)
if is_lvalue:
msg.cant_assign_to_method(node)
typ = map_instance_to_supertype(typ, method.info)
if name == '__new__':
# __new__ is special and behaves like a static method -- don't strip
# the first argument.
signature = function_type(method, builtin_type('builtins.function'))
else:
signature = method_type_with_fallback(method, builtin_type('builtins.function'))
return expand_type_by_instance(signature, typ)
else:
# Not a method.
return analyze_member_var_access(name, typ, info, node,
is_lvalue, is_super, builtin_type,
not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, AnyType):
# The base object has dynamic type.
return AnyType()
elif isinstance(typ, NoneTyp):
if chk and chk.should_suppress_optional_error([typ]):
return AnyType()
# The only attribute NoneType has are those it inherits from object
return analyze_member_access(name, builtin_type('builtins.object'), node, is_lvalue,
is_super, is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, UnionType):
# The base object has dynamic type.
msg.disable_type_names += 1
results = [analyze_member_access(name, subtype, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
chk=chk)
for subtype in typ.items]
msg.disable_type_names -= 1
return UnionType.make_simplified_union(results)
elif isinstance(typ, TupleType):
# Actually look up from the fallback instance type.
return analyze_member_access(name, typ.fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg, chk=chk)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
# Class attribute.
# TODO super?
ret_type = typ.items()[0].ret_type
if isinstance(ret_type, TupleType):
ret_type = ret_type.fallback
if isinstance(ret_type, Instance):
if not is_operator:
# When Python sees an operator (eg `3 == 4`), it automatically translates that
# into something like `int.__eq__(3, 4)` instead of `(3).__eq__(4)` as an
# optimation.
#
# While it normally it doesn't matter which of the two versions are used, it
# does cause inconsistencies when working with classes. For example, translating
# `int == int` to `int.__eq__(int)` would not work since `int.__eq__` is meant to
# compare two int _instances_. What we really want is `type(int).__eq__`, which
# is meant to compare two types or classes.
#
# This check makes sure that when we encounter an operator, we skip looking up
# the corresponding method in the current instance to avoid this edge case.
# See https://github.com/python/mypy/pull/1787 for more info.
result = analyze_class_attribute_access(ret_type, name, node, is_lvalue,
builtin_type, not_ready_callback, msg)
if result:
return result
# Look up from the 'type' type.
return analyze_member_access(name, typ.fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
else:
assert False, 'Unexpected type {}'.format(repr(ret_type))
elif isinstance(typ, FunctionLike):
# Look up from the 'function' type.
return analyze_member_access(name, typ.fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, TypeVarType):
return analyze_member_access(name, typ.upper_bound, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
elif isinstance(typ, DeletedType):
msg.deleted_as_rvalue(typ, node)
return AnyType()
elif isinstance(typ, TypeType):
# Similar to FunctionLike + is_type_obj() above.
item = None
if isinstance(typ.item, Instance):
item = typ.item
elif isinstance(typ.item, TypeVarType):
if isinstance(typ.item.upper_bound, Instance):
item = typ.item.upper_bound
if item and not is_operator:
# See comment above for why operators are skipped
result = analyze_class_attribute_access(item, name, node, is_lvalue,
builtin_type, not_ready_callback, msg)
if result:
return result
fallback = builtin_type('builtins.type')
return analyze_member_access(name, fallback, node, is_lvalue, is_super,
is_operator, builtin_type, not_ready_callback, msg,
report_type=report_type, chk=chk)
if chk and chk.should_suppress_optional_error([typ]):
return AnyType()
return msg.has_no_attr(report_type, name, node) | 5,332,711 |
def close_gaps(in_path, out_path, threshold=0.1):
"""
Interpolates the holes (no data value) in the input raster.
Input:
in_path: {string} path to the input raster with holes
threshold: {float} Tension Threshold
Output:
out_path: {string} path to the generated raster with closed holes.
"""
cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format(
in_path, threshold, out_path
)
os.system(cmd) | 5,332,712 |
def has_global(node, name):
"""
check whether node has name in its globals list
"""
return hasattr(node, "globals") and name in node.globals | 5,332,713 |
def make_generator_model(input_dim=100) -> tf.keras.Model:
"""Generator モデルを生成する
Args:
input_dim (int, optional): 入力次元. Defaults to 100.
Returns:
tf.keras.Model: Generator モデル
"""
dense_size = (7, 7, 256)
conv2d1_channel = 128
conv2d2_channel = 64
conv2d3_channel = 1
model = tf.keras.Sequential()
model.add(
layers.Dense(
dense_size[0] * dense_size[1] * dense_size[2],
use_bias=False,
input_shape=(input_dim,),
)
)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape(dense_size))
assert model.output_shape == (None, dense_size[0], dense_size[1], dense_size[2])
_add_conv2d_transpose_layer(
model,
conv2d1_channel,
(5, 5),
(1, 1),
(None, dense_size[0], dense_size[1], conv2d1_channel),
)
_add_conv2d_transpose_layer(
model,
conv2d2_channel,
(5, 5),
(2, 2),
(None, dense_size[0] * 2, dense_size[1] * 2, conv2d2_channel),
)
model.add(
layers.Conv2DTranspose(
conv2d3_channel,
(5, 5),
strides=(2, 2),
padding="same",
use_bias=False,
activation="tanh",
)
)
assert model.output_shape == (
None,
dense_size[0] * 4,
dense_size[1] * 4,
conv2d3_channel,
)
return model | 5,332,714 |
def shutdown():
"""
Tell the arkouda server to delete all objects and shut itself down.
"""
global socket, pspStr, connected, verbose
# send shutdown message to server
message = "shutdown"
if verbose: print("[Python] Sending request: %s" % message)
socket.send_string(message)
message = socket.recv_string()
if verbose: print("[Python] Received response: %s" % message)
socket.disconnect(pspStr)
print(message)
connected = False | 5,332,715 |
def deploy_gradle(app, deltas={}):
"""Deploy a Java application using Gradle"""
java_path = join(ENV_ROOT, app)
build_path = join(APP_ROOT, app, 'build')
env_file = join(APP_ROOT, app, 'ENV')
env = {
'VIRTUAL_ENV': java_path,
"PATH": ':'.join([join(java_path, "bin"), join(app, ".bin"), environ['PATH']])
}
if exists(env_file):
env.update(parse_settings(env_file, env))
if not exists(java_path):
makedirs(java_path)
if not exists(build_path):
echo("-----> Building Java Application")
call('gradle build', cwd=join(APP_ROOT, app), env=env, shell=True)
else:
echo("-----> Removing previous builds")
echo("-----> Rebuilding Java Application")
call('gradle clean build', cwd=join(APP_ROOT, app), env=env, shell=True)
return spawn_app(app, deltas) | 5,332,716 |
def find_badge_by_slug(slug: str) -> Optional[Badge]:
"""Return the badge with that slug, or `None` if not found."""
badge = db.session \
.query(DbBadge) \
.filter_by(slug=slug) \
.one_or_none()
if badge is None:
return None
return _db_entity_to_badge(badge) | 5,332,717 |
def start_bot(tasks):
"""run async loop with tasks list"""
platforms=asyncio.gather(*tasks,loop=loop)
loop.run_until_complete(platforms) | 5,332,718 |
def reslice_ops(ops, aligned_op_slice_sizes, op_reg_manager):
"""Reslices ops according to aligned sizes.
Args:
ops: List of tf.Operation to slice.
aligned_op_slice_sizes: List of integer slice sizes.
op_reg_manager: OpRegularizerManager to keep track of slicing.
"""
for op_to_slice in ops:
op_slice_sizes = [
op_slice.slice.size
for op_slice in op_reg_manager.get_op_slices(op_to_slice)]
if op_slice_sizes and op_slice_sizes != aligned_op_slice_sizes:
op_reg_manager.slice_op(op_to_slice, aligned_op_slice_sizes) | 5,332,719 |
def extract_img_features(
input_path,
input_type,
output_path,
img=None,
img_meta=None,
feature_mask_shape="spot",
):
"""
Extract features from image. Works with IF or HE image from Visium tif files.
For block feature, a square will be drawn around each spot. Since it is bigger than
the spot itself, it is more suitable to extract texture features.
For Spot feature, only area in the actual sequencing spot will be uses.
It is more suitable to extract intensity features.
Parameters
----------
input_path : str
input folder containing all necessary files.
input_type : str
input image type, select from {'if','he'}.
output_path : str
output folder path.
img : None or np.array, optional
alternative input for image, will override input_path.
img_meta : None or np.array, optional
alternative input for image metadata, will override input_path.
feature_mask_shape : {'spot', 'block'}
type of feature extracted.
"""
intensity_fn = os.path.join(
os.path.abspath(output_path),
"{}_level_texture_features.csv".format(feature_mask_shape)
)
texture_fn = os.path.join(
os.path.abspath(output_path),
"{}_level_intensity_features.csv".format(feature_mask_shape)
)
if (os.path.exists(intensity_fn)) == (os.path.exists(texture_fn)) == True:
print('Features are already extracted.')
return
if img_meta is None:
img_meta = pd.read_csv(
os.path.join(input_path,"Spot_metadata.csv"), index_col=0)
if img is None:
img_tif = [x for x in os.listdir(input_path) if "tif" in x][0]
img_tif = os.path.join(input_path, img_tif)
if input_type == "if":
# the indexing is a workaround for the strange Visium if image channels.
img = io.imread(img_tif)
img = img_as_float32(img)
img = (255 * img).astype("uint8")
else:
img = io.imread(img_tif)
# normalize image with color deconv
print('Normalizing image...')
img = separate_stains(img, hdx_from_rgb)
img = minmax_scale(img.reshape(-1, 3)).reshape(img.shape)
img = np.clip(img, 0, 1)
img = exposure.equalize_adapthist(img, clip_limit=0.01)
img = (255 * img).astype("uint8")
# Hard coded type of Haralick features and Angles for searching for neighboring pixels
# hard coded number of angles to be 4, meaning horizontal, vertical and two diagonal directions.
# extracting block shaped features
if feature_mask_shape == "block":
tmp = img_meta.sort_values(["Row", "Col"])
block_y = int(np.median(tmp.Y.values[2:-1] - tmp.Y.values[1:-2]) // 2)
tmp = img_meta.sort_values(["Col", "Row"])
block_x = int(np.median(tmp.X.values[2:-1] - tmp.X.values[1:-2]) // 2)
block_r = min(block_x, block_y)
block_x = block_y = block_r
print("Prossessing {}".format(input_path))
feature_set = [
"contrast",
"dissimilarity",
"homogeneity",
"ASM",
"energy",
"correlation",
]
text_features = []
intensity_features = []
for i in range(img_meta.shape[0]):
if (i + 1) % 100 == 0:
print("Processing {} spot out of {} spots".format(i + 1, img_meta.shape[0]))
row = img_meta.iloc[i]
x, y, r = row[["X", "Y", "Spot_radius"]].astype(int)
if feature_mask_shape == "spot":
spot_img = img[x - r : x + r + 1, y - r : y + r + 1]
spot_mask = morphology.disk(r)
# only use the spot, not the bbox
spot_img = np.einsum("ij,ijk->ijk", spot_mask, spot_img)
else:
spot_img = img[x - block_x : x + block_x + 1, y - block_y : y + block_y + 1]
spot_mask = np.ones_like(spot_img[:, :, 0], dtype="bool")
# extract texture features
ith_texture_f = []
for c in range(img.shape[2]):
glcm = greycomatrix(
spot_img[:, :, c],
distances=[1],
# Angles are arranged in a counter clockwise manner, in radian.
angles=[0, np.pi / 4, np.pi / 2, 3 * np.pi / 4],
levels=256,
symmetric=True,
normed=False,
)
glcm = glcm[1:, 1:]
glcm = glcm / np.sum(glcm, axis=(0, 1))
for feature_name in feature_set:
ith_texture_f += greycoprops(glcm, feature_name)[0].tolist()
# The first 6 features are intensity features, and the rest are Haralicks.
text_features.append(ith_texture_f)
# extract intensity features
int_low = 0.2
int_high = 0.8
int_step = 0.1
q_bins = np.arange(int_low, int_high, int_step)
ith_int_f = []
for c in range(img.shape[2]):
for t in q_bins:
ith_int_f.append(np.quantile(spot_img[:, :, c][spot_mask == True], t))
intensity_features.append(ith_int_f)
# Naming the features. f stands for channels, A stands for angles.
# construct texture feature table
channels = ["f" + str(i) for i in range(img.shape[2])]
col_names = product(channels, feature_set, ["A1", "A2", "A3", "A4"])
col_names = ["_".join(x) for x in col_names]
text_features = pd.DataFrame(text_features, index=img_meta.index, columns=col_names)
# construct intensity feature table
intensity_features = pd.DataFrame(
intensity_features,
index=img_meta.index,
columns=[
"_".join(x) for x in product(channels, ["{:.1f}".format(x) for x in q_bins])
],
)
text_features.to_csv(intensity_fn)
intensity_features.to_csv(texture_fn)
return text_features, intensity_features | 5,332,720 |
def image_generator(files, batch_size):
"""
Generate batches of images for training instead of loading all images into memory
:param files:
:param batch_size:
:return:
"""
while True:
# Select files (paths/indices) for the batch
batch_paths = np.random.choice(a=files,
size=batch_size)
batch_input = []
batch_output = []
# Read in each input, perform preprocessing and get labels
for input_path in batch_paths:
input = load_gif_data(input_path)
if "pos" in input_path: # if file name contains pos
output = np.array([1, 0]) # label
elif "neg" in input_path: # if file name contains neg
output = np.array([0, 1]) # label
batch_input += [input]
batch_output += [output]
# Return a tuple of (input,output) to feed the network
batch_x = np.array(batch_input)
batch_y = np.array(batch_output)
yield (batch_x, batch_y) | 5,332,721 |
def validate_color(color,default,color_type):
"""Validate a color against known PIL values. Return the validated color if valid; otherwise return a default.
Keyword arguments:
color: color to test.
default: default color string value if color is invalid.
color_type: string name for color type, used for alerting users of defaults.
"""
# Use exception handling. If a given color throws an error, we may return false.
try:
c = ImageColor.getcolor(color,'RGB')
return color
except ValueError as e:
logging.warning('"%s" is not a valid color specifier. Defaulting to "%s" for %s color.',color,default,color_type)
return default | 5,332,722 |
def save_dreams(basedir, agent, data, embed, image_pred, obs_type='lidar', summary_length=5, skip_frames=10):
""" Perform dreaming and save the imagined sequences as images.
`basedir`: base log dir where the images will be stored
`agent`: instance of dreamer
`embed`: tensor of embeds, shape (B, E) where B is the episode length
`data`: dictionary of observed data, it contains observation and camera images
`image_pred`: distribution of predicted reconstructions
`obs_type`: observation type, either lidar or lidar_occupancy
"""
imagedir = basedir / "images"
imagedir.mkdir(parents=True, exist_ok=True)
if obs_type == 'lidar':
truth = data['lidar'][:1] + 0.5
recon = image_pred.mode()[:1]
init, _ = agent._dynamics.observe(embed[:1, :summary_length],
data['action'][:1, :summary_length])
init = {k: v[:, -1] for k, v in init.items()}
prior = agent._dynamics.imagine(data['action'][:1, summary_length:], init)
openl = agent._decode(agent._dynamics.get_feat(prior)).mode()
model = tf.concat([recon[:, :summary_length] + 0.5, openl + 0.5], 1)
truth_img = tools.lidar_to_image(truth)
model_img = tools.lidar_to_image(model)
elif obs_type == 'lidar_occupancy':
truth_img = data['lidar_occupancy'][:1]
recon = image_pred.mode()[:1]
recon = tf.cast(recon, tf.float32) # concatenation requires same type
init, _ = agent._dynamics.observe(embed[:1, :summary_length],
data['action'][:1, :summary_length])
init = {k: v[:, -1] for k, v in init.items()}
prior = agent._dynamics.imagine(data['action'][:1, summary_length:], init)
openl = agent._decode(agent._dynamics.get_feat(prior)).mode()
openl = tf.cast(openl, tf.float32)
model_img = tf.concat([recon[:, :summary_length], openl],
1) # note: recon and open_l is already 0 or 1, no need scaling
else:
raise NotImplementedError(f"save dreams not implemented for {obs_type}")
timestamp = time.time()
plt.box(False)
plt.axis(False)
plt.ion()
for imgs, prefix in zip([data['image'], truth_img, model_img], ["camera", "true", "recon"]):
for ep in range(imgs.shape[0]):
for t in range(0, imgs.shape[1], skip_frames):
# plot black/white without borders
plt.imshow(imgs[ep, t, :, :, :], cmap='binary')
plt.savefig(f"{imagedir}/frame_{timestamp}_{obs_type}_{prefix}_{ep}_{t}.png",
bbox_inches='tight', transparent=True, pad_inches=0) | 5,332,723 |
def d_xx_yy_tt(psi):
"""Return the second derivative of the field psi by fft
Parameters
--------------
psi : array of complex64 for the field
Returns
--------------
cxx psi_xx+ cyy psi_yy + ctt psi_tt : second derivatives with respect to x
"""
# this function is to remove
global LAPL
return fft.ifft2(LAPL * fft.fft2(psi)) | 5,332,724 |
def test_classes(classes, expected_classes):
"""Test that classes are properly parsed.
1. Create an action parser for a dictionary with specific classes.
2. Parse classes.
3. Check the parsed classes.
"""
actual_classes = ActionParser(data={"class": classes}, parser=JSONParser()).parse_classes()
assert actual_classes == expected_classes, "Wrong classes" | 5,332,725 |
def negate_objective(objective):
"""Take the negative of the given objective (converts a gain into a loss and vice versa)."""
if isinstance(objective, Iterable):
return (list)((map)(negate_objective, objective))
else:
return -objective | 5,332,726 |
def process_vm_size(file_name: str) -> Any:
"""
Extract VMs instance specification.
:file_name (str) File name
Return VMs specification object
"""
current_app.logger.info(f'Processing VM Size {file_name}...')
file = open(file_name,)
data = json.load(file)
return data | 5,332,727 |
def gen_key(password, salt, dkLen=BLOCKSIZE):
"""
Implement PBKDF2 to make short passwords match the BLOCKSIZE.
Parameters
---------
password str
salt str
dkLen int
Returns
-------
- str
"""
return KDF.PBKDF2(password, salt, dkLen=BLOCKSIZE) | 5,332,728 |
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) | 5,332,729 |
def test_success(database):
""" BusinessFundsIndicator must contain one of the following values: REC or NON. Case doesn't matter """
det_award = DetachedAwardFinancialAssistanceFactory(business_funds_indicator="REC")
det_award_2 = DetachedAwardFinancialAssistanceFactory(business_funds_indicator="non")
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2])
assert errors == 0 | 5,332,730 |
def parse_plot_set(plot_set_string):
"""
Given one of the string arguments to the --plot-sets option, parse out a
data structure representing which conditions ought to be compared against
each other, and what those comparison plots/tables should be called.
The syntax of a plot set is [title:]condition[,condition[,condition...]].
The first condition is the comparison baseline, when applicable.
Returns a tuple of a plot set title, or None if unspecified, and a list of
condition names.
"""
colon_pos = plot_set_string.find(':')
if colon_pos != -1:
# Pull out the title before the colon
title = plot_set_string[0:colon_pos]
# And the rest of the specifier after it
plot_set_string = plot_set_string[colon_pos + 1:]
else:
# No title given
title = None
# Return the title and condition list tuple
return (title, plot_set_string.split(',')) | 5,332,731 |
def check_types_excel(row: tuple) -> bool:
"""Returns true if row from excel file has correct types"""
if not isinstance(row[1], (pd.Timestamp, str)):
return False
if not ((isinstance(row[2], dt.time) and isinstance(row[3], dt.time)) or
(isinstance(row[2], str) and isinstance(row[3], str))):
return False
if not all((isinstance(x, str) for x in row[4:5])):
return False
if not isinstance(row[6], (str, int)):
return False
if not isinstance(row[7], (str, int, float)):
# 3.27, 3.27a and 137 should all be supported
return False
return True | 5,332,732 |
def add(num1, num2):
""" Adds two numbers
>>> add(2,4)
6
"""
return num1 + num2 | 5,332,733 |
def is_palindrome1(str):
"""
Create slice with negative step and confirm equality with str.
"""
return str[::-1] == str | 5,332,734 |
def number_generetor(view, form):
""" Генератор номера платежа (по умолчанию) """
if is_py2:
uuid_fields = uuid4().get_fields()
else:
uuid_fields = uuid4().fields
return u'{:%Y%m%d}-{:08x}'.format(datetime.now(), uuid_fields[0]) | 5,332,735 |
def get_underlay_info():
"""
:return:
"""
return underlay_info | 5,332,736 |
async def get_guild_roles(id_: int):
"""
Get the roles of a guild
:param id_: Guild ID
:return: List of roles
"""
guild = await router.bot.rest.fetch_guild(id_)
if guild is None:
return status.HTTP_404_NOT_FOUND
roles = await guild.fetch_roles()
return [to_dict(role) for role in roles] | 5,332,737 |
def prior_min_field(field_name, field_value):
"""
Creates prior min field with the
:param field_name: prior name (field name initial)
:param field_value: field initial properties
:return: name of the min field, updated field properties
"""
name = field_name
value = field_value.copy()
value.update({
'label': 'Min',
'required': False,
})
return name + '_min', value | 5,332,738 |
def checkpoint_save_config():
"""Fixture to create a config for saving attributes of a detector."""
toolset = {
"test_id": "Dummy_test",
"saved_attributes": {
"FeatureExtraction": [
"dummy_dict",
"dummy_list",
"dummy_tuple",
"dummy_tensor",
"dummy_val",
],
},
"save_attributes": True,
"attributes": {},
"save_elementwise": True,
}
return toolset | 5,332,739 |
def matrix_sum_power(A, T):
"""Take the sum of the powers of a matrix, i.e.,
sum_{t=1} ^T A^t.
:param A: Matrix to be powered
:type A: np.ndarray
:param T: Maximum order for the matrixpower
:type T: int
:return: Powered matrix
:rtype: np.ndarray
"""
At = np.eye(A.shape[0])
As = np.zeros((A.shape[0], A.shape[0]))
for _ in range(T):
At = A @ At
As += At
return As | 5,332,740 |
def mean_zero_unit_variance(arr, mean_vector=None, std_vector=None, samples_in='row'):
"""
Normalize input data to have zero mean and unit variance.
Return the normalized data, the mean, and the calculated standard
deviation which was used to normalize the data
[normalized, meanvec, stddev] = mean_zero_unit_variance(data)
or
[normalized, meanvec, stddev] = mean_zero(data, mean_vector=provided_mean_vector)
etc.
"""
samplesIn = 1 if samples_in == 'col' else 0
dimsIn = int(not samplesIn)
nSamples = arr.shape[samplesIn]
nDims = arr.shape[dimsIn]
theshape = [1, 1]
theshape[dimsIn] = nDims
if not mean_vector:
mean_vector = arr.mean(axis=samplesIn).reshape(theshape)
if not std_vector:
std_vector = arr.std(axis=samplesIn).reshape(theshape)
# If you have a row with absolutely no information, you will divide by zero. Hence...
std_vector[std_vector < 1e-6] = 1
norma = (arr - mean_vector) / std_vector
return norma, mean_vector, std_vector | 5,332,741 |
def soft_precision(scores: torch.FloatTensor,
mask: torch.FloatTensor) -> torch.FloatTensor:
"""
Helper function for computing soft precision in batch.
# Parameters
scores : torch.FloatTensor
Tensor of scores with shape: (num_refs, num_cands, max_ref_len, max_cand_len)
mask : torch.FloatTensor
Mask for the candidate tensor with shape: (num_cands, max_cand_len)
"""
max_scores, _ = scores.max(dim=-2)
masked_max_scores = max_scores * mask.unsqueeze(dim=0)
precision = masked_max_scores.sum(dim=-1) / mask.sum(dim=-1).view(1, -1)
return precision | 5,332,742 |
def fit_sir(times, T_real, gamma, population, store, pathtoloc, tfmt='%Y-%m-%d', method_solver='DOP853', verbose=True, \
b_scale=1):
"""
Fit the dynamics of the SIR starting from real data contained in `pathtocssegi`.
The initial condition is taken from the real data.
The method assumes that in the `store` at the indicated `path`, there are entries
in the format %Y-%m-%d that described the infectivity matrices
for the times `times[:-1]`.
`populations` is the vector with the population per community.
OUTPUT:
* Xs
* ts
* scales
For the output the dumping interval is one day.
"""
# initializations
nt = len(times)
t = times[0]
B = read_df(t, tfmt, store, pathtoloc).to_numpy()
N = B.shape[0]
Y_real = np.einsum('ta,a->t', T_real, population) / np.sum(population)
X = np.zeros((2, N), dtype=np.float_)
I = T_real[0]
S = 1 - I
X = sir_SI_to_X(S, I)
y = get_sir_omega_X(X, population)
ts = [t]
Xs = [X.reshape(2,N)]
Ys = [y]
b_scales = []
blo = 0.
# print("nt = ", nt)
for i in range(1, nt):
if verbose:
print(f'Integrating day {t}')
mykey = Path(pathtoloc) / t.strftime(tfmt)
mykey = str(mykey)
if mykey in store.keys():
B = read_df(t, tfmt, store, pathtoloc).to_numpy()
elif verbose:
print("Infectivity matrix not updated!")
tnew = times[i]
dt = int((tnew - t).days)
ypred = Y_real[i]
# root finding method
func_root = lambda b: get_sir_omega_X(compute_sir_X(X, dt, b*B, gamma, method_solver), \
population) - ypred
# initial bracketing
bhi = b_scale
fscale = 3.
for k in range(1,10):
f = func_root(bhi)
if f > 0:
break
else:
bhi *= fscale
if f < 0:
raise ValueError("Problem in bracketing!")
# find the root
sol = scipy.optimize.root_scalar(func_root, bracket=(blo, bhi), method='brentq', \
options={'maxiter': 100})
if not (sol.converged):
raise ValueError("root finding failed!")
b_scale = sol.root
# compute next state with optimal scale
t_eval = np.arange(dt+1)
Xnews = compute_sir_X(X, dt, b_scale*B, gamma, method_solver, t_eval=t_eval)
Xnew = Xnews[-1]
y = get_sir_omega_X(Xnew,population)
print(f"b = {b_scale}, y = {y}, ypred = {ypred}, y-ypred = {y-ypred}")
# dump
# data.append(Xnew.reshape(2,N))
Xs += [Xnew.reshape(2,N) for Xnew in Xnews]
ts += [t + datetime.timedelta(days=int(dt)) for dt in t_eval[1:]]
Ys.append(y)
b_scales.append(b_scale)
# update
t = tnew
X = Xnew
b_scales.append(None) # B has ndays-1 entries
print("Fitting complete")
# prepare export of results
S = np.array([X[0] for X in Xs])
I = np.array([X[1] for X in Xs])
clusters = np.arange(N, dtype=np.uint)
df_S = pd.DataFrame(data=S, index=ts, columns=clusters)
df_I = pd.DataFrame(data=I, index=ts, columns=clusters)
df_fit = pd.DataFrame(data=np.array([b_scales, Ys]).T, index=times, columns=["scale", "frac_infected_tot"])
return df_S, df_I, df_fit | 5,332,743 |
def prepare_es(app, db):
"""Prepare ES indices."""
return | 5,332,744 |
def panelist_debuts_by_year(database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict of show years with a list of panelists'
debut information"""
show_years = retrieve_show_years(database_connection)
panelists = retrieve_panelists_first_shows(database_connection)
years_debut = OrderedDict()
for year in show_years:
years_debut[year] = []
for panelist in panelists:
panelist_info = panelists[panelist]
years_debut[panelist_info["year"]].append(panelist_info)
return years_debut | 5,332,745 |
def _stirring_conditions_html(stirring: reaction_pb2.StirringConditions) -> str:
"""Generates an HTML-ready description of stirring conditions.
Args:
stirring: StirringConditions message.
Returns:
String description of the stirring conditions.
"""
if stirring.type == stirring.NONE:
return ""
txt = ""
if stirring.type != stirring.UNSPECIFIED:
txt += {
stirring.CUSTOM: stirring.details,
stirring.STIR_BAR: "stir bar",
stirring.OVERHEAD_MIXER: "overhead mixer",
stirring.AGITATION: "agitation",
}[stirring.type]
if stirring.rate.rpm:
txt += f" ({stirring.rate.rpm} rpm)"
return txt | 5,332,746 |
def load_action_plugins():
"""
Return a list of all registered action plugins
"""
logger.debug("Loading action plugins")
plugins = get_plugins(action, ActionPlugin)
if len(plugins) > 0:
logger.info("Discovered {n} action plugins:".format(n=len(plugins)))
for ap in plugins:
logger.debug(" - {ap}".format(ap=ap.PLUGIN_NAME))
return plugins | 5,332,747 |
def read_file_in_root_directory(*names, **kwargs):
"""Read a file."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf-8')
).read().strip() | 5,332,748 |
def set_effective_property_value_for_node(
nodeId: dom.NodeId, propertyName: str, value: str
) -> dict:
"""Find a rule with the given active property for the given node and set the new value for this
property
Parameters
----------
nodeId: dom.NodeId
The element id for which to set property.
propertyName: str
value: str
"""
return {
"method": "CSS.setEffectivePropertyValueForNode",
"params": {"nodeId": int(nodeId), "propertyName": propertyName, "value": value},
} | 5,332,749 |
def main(args, unit_test=False, path=''):
""" Builds the HTML files
path : str, optional
Mainly for running the unit test
"""
from pypeit.core import qa
# Flags
flg_MF, flg_exp = False, False
if args.type == 'MF':
flg_MF = True
elif args.type == 'exp':
flg_exp = True
elif args.type == 'all':
flg_exp, flg_MF = True, True
# Master Frame
if flg_MF:
qa.gen_mf_html(args.pypeit_file, args.qapath)
# Exposures
if flg_exp:
qa.gen_exp_html() | 5,332,750 |
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
msgs = _filter_messages(msgs)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.debug(
"An exception occurred while trying to load the NLU data. {}".format(str(e))
)
# No previous file exists, use empty training data as replacement.
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
nlu_format = _get_nlu_target_format(export_nlu_path)
if nlu_format == MARKDOWN:
stringified_training_data = nlu_data.nlu_as_markdown()
else:
stringified_training_data = nlu_data.nlu_as_json()
io_utils.write_text_file(stringified_training_data, export_nlu_path) | 5,332,751 |
def texas_job_centers(num_polygons = 4000):
""" Choropleth of the percent of workers who work in their place of residence for Austin, Dallas, Houston """
# Get block geodata for all of Texas and calculate features
block_data = boundaries.BlockBoundaries(['X08_COMMUTING', 'X01_AGE_AND_SEX'], cities = None)
# Female and male workers working in their place of residence
block_data.data['local_workers'] = block_data.data['B08008e3'] + block_data.data['B08008e8']
# Total number of male and female workers in the block group
block_data.data['total_workers'] = block_data.data['B08008e2'] + block_data.data['B08008e7']
block_data.data['local_workers_pct'] = 100*block_data.data['local_workers'].divide(block_data.data['total_workers']).fillna(0)
# Get sindex and loop through blocks around austin/dallas/houston
spatial_index = block_data.sindex
for name, zoning_input in zip(['Austin', 'Dallas', 'Houston'],
[zoning.austin_inputs, zoning.dallas_inputs, zoning.houston_inputs]):
# Get basemap
basemap = folium.Map([zoning_input.lat, zoning_input.long], zoom_start=10)
# Query and find nearest neighbors, subset
nearest_index = list(spatial_index.nearest((zoning_input.long, zoning_input.lat), num_results = num_polygons))
city_data = block_data.data.iloc[nearest_index]
city_data['geometry'] = city_data['geometry'].simplify(tolerance = 0.001)
# Graph
choropleth.continuous_choropleth(city_data, factor = 'local_workers_pct',
layer_name='Percent of Workers Working in Place of Residence',
colors = ['white', 'green', 'blue'],
quants = None,
method = 'linear',
show = False, basemap = basemap)
# Save
folium.TileLayer('cartodbdark_matter').add_to(basemap)
folium.LayerControl().add_to(basemap)
basemap.save('Figures/Suburbs/{}_job_choropleth.html'.format(name)) | 5,332,752 |
def classroom_page(request,unique_id):
"""
Classroom Setting Page.
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
pending_members = classroom.pending_members.all()
admins = classroom.special_permissions.all()
members = admins | classroom.members.all()
is_admin = classroom.special_permissions.filter(username = request.user.username).exists()
#classroom_update
if request.method=="POST":
form = CreateclassForm(request.POST,request.FILES,instance=classroom)
if form.is_valid():
form.save()
return redirect(reverse('subjects',kwargs={'unique_id':classroom.unique_id}))
else:
form = CreateclassForm(instance=classroom)
params={
'members':members.distinct(),
'admins':admins,
'pending_members':pending_members,
'classroom':classroom,
'is_admin':is_admin,
'form':form,
}
return render(request,'classroom_settings.html',params) | 5,332,753 |
async def remove(ctx, name):
"""Join the current interview practice."""
# Retrieve the current server
server = get_server(ctx.guild.id)
# Join the event on the server
server.leave_event(name)
# Send the join message
await ctx.send(name + " has been removed from the interview practice.")
return | 5,332,754 |
def notna(obj: pandas.core.indexes.numeric.Int64Index):
"""
usage.dask: 2
"""
... | 5,332,755 |
def evaluate(flight_input, flight_target):
"""Evaluate between G1000 and phone GPS data
flight_input : flight.FlightPruneData
phone data
flight_target : flight.FlightPruneData
g1000 data
"""
STRATUX_LIST = (
'020918_00', '020918_01', '021318_00', '021318_01', '021318_02',
'021418_00', '022718_00', '030318_00', '030318_01', '030318_02',
'030318_03', '030418_00', '030418_01', '032618_00', '040718_00',
'040718_01', '040718_02', '041018_00', '042018_00', '042018_01',
'042018_02', '042718_00', '042718_01', '042718_02',
)
flight_std = flight_target.clone()
flight_obv = flight_input.clone()
# flight_std.prune_identifier(remain_identifier=STRATUX_LIST)
# flight_obv.prune_identifier(remain_identifier=STRATUX_LIST)
evaluate_abs_diff(
standard=flight_std, std_key='alt',
observe=flight_obv , obv_key='alt')
evaluate_abs_diff(
standard=flight_std, std_key='lat',
observe=flight_obv , obv_key='lat')
evaluate_abs_diff(
standard=flight_std, std_key='long',
observe=flight_obv , obv_key='long') | 5,332,756 |
def concat_hists(hist_array: np.array):
"""Concatenate multiple histograms in an array by adding them up with error prop."""
hist_final = hist_array[0]
for hist in hist_array[1:]:
hist_final.addhist(hist)
return hist_final | 5,332,757 |
def renorm_flux_lightcurve(flux, fluxerr, mu):
""" Normalise flux light curves with distance modulus."""
d = 10 ** (mu/5 + 1)
dsquared = d**2
norm = 1e18
# print('d**2', dsquared/norm)
fluxout = flux * dsquared / norm
fluxerrout = fluxerr * dsquared / norm
return fluxout, fluxerrout | 5,332,758 |
def retrieve(args: argparse.Namespace, file_handler: DataFilesHandler, homepath: Path) -> str:
"""Find an expression by name."""
name = NAME_MULTIPLEXOR.join(args.REGULAR_EXPRESSION_NAME)
try:
return file_handler.get_pattern(name)
except KeyError:
pass
if args.local or (not file_handler.config.should_lookup and not args.online):
print(f"{name} not found locally, lookups disabled.", file=sys.stderr)
sys.exit(1)
external_patterns = (
PoolManager().request("GET", file_handler.config.lookup_location).data.decode("utf-8")
)
try:
pattern = DataFilesHandler(StringIO(), StringIO(external_patterns)).get_pattern(name)
except KeyError:
print(
f"{name} not found at {file_handler.config.lookup_location} or locally.",
file=sys.stderr,
)
sys.exit(1)
if args.save or (file_handler.config.should_save and not args.no_save):
file_handler.set_pattern(name, pattern)
file_handler.flush(str(homepath), patterns=True)
return pattern | 5,332,759 |
def find_focus(stack):
"""
Parameters
----------
stack: (nd-array) Image stack of dimension (Z, ...) to find focus
Returns
-------
focus_idx: (int) Index corresponding to the focal plane of the stack
"""
def brenner_gradient(im):
assert len(im.shape) == 2, 'Input image must be 2D'
return np.mean((im[:-2, :] - im[2:, :]) ** 2)
focus_scores = []
for img in stack:
focus_score = brenner_gradient(img)
focus_scores.append(focus_score)
focus_idx_min = np.where(focus_scores == np.min(focus_scores))[0][0]
focus_idx_max = np.where(focus_scores == np.max(focus_scores))[0][0]
return focus_idx_max, focus_idx_min | 5,332,760 |
def get_amr_line(input_f):
"""Read the amr file. AMRs are separated by a blank line."""
cur_amr=[]
has_content=False
for line in input_f:
if line[0]=="(" and len(cur_amr)!=0:
cur_amr=[]
if line.strip()=="":
if not has_content:
continue
else:
break
elif line.strip().startswith("#"):
# omit the comment in the AMR file
continue
else:
has_content=True
cur_amr.append(delete_pattern(line.strip(), '~e\.[0-9]+(,[0-9]+)*'))
#cur_amr.append(line.strip())
return "".join(cur_amr) | 5,332,761 |
def getHwAddrForIp(ip):
"""
Returns the MAC address for the first interface that matches the given IP
Returns None if not found
"""
for i in netifaces.interfaces():
addrs = netifaces.ifaddresses(i)
try:
if_mac = addrs[netifaces.AF_LINK][0]['addr']
if_ip = addrs[netifaces.AF_INET][0]['addr']
except IndexError, KeyError: # Ignore ifaces that dont have MAC or IP
if_mac = if_ip = None
if if_ip == ip:
return if_mac
return None | 5,332,762 |
def main():
"""Console script for {{cookiecutter.project_slug}}."""
args = _parse_args() | 5,332,763 |
def rbf_multiquadric(r, epsilon=1.0, beta=2.5):
"""
multiquadric
"""
return np.sqrt((epsilon*r)**2 + 1.0) | 5,332,764 |
def is_valid_sudoku(board):
"""
Checks if an input sudoku board is valid
Algorithm:
For all non-empty squares on board, if value at that square is a number,
check if the that value exists in that square's row, column,
and minor square.
If it is, return False.
"""
cols = [set() for _ in range(9)]
squares = [[set() for _ in range(3)] for x in range(3)]
for row in range(9):
rows = set()
for col in range(9):
if board[row][col] == ".":
continue
# Check row
if board[row][col] in rows:
return False
else:
rows.add(board[row][col])
# Check col
if board[row][col] in cols[col]:
return False
else:
cols[col].add(board[row][col])
# Check square
if board[row][col] in squares[row // 3][col // 3]:
return False
else:
squares[row // 3][col // 3].add(board[row][col])
return True | 5,332,765 |
def lab_to_nwb_dict(lab_key):
"""
Generate a dictionary containing all relevant lab and institution info
:param lab_key: Key specifying one entry in element_lab.lab.Lab
:return: dictionary with NWB parameters
"""
lab_info = (lab.Lab & lab_key).fetch1()
return dict(
institution=lab_info.get("institution"),
lab=lab_info.get("lab_name"),
) | 5,332,766 |
def test__is_not_template_or_throws__throws_when_domain_doesnt_exists():
"""Test that it throws when the domain doesn't exists at all"""
with patch("qsm.dom0.exists_or_throws", side_effect=lib.QsmDomainDoesntExistError, autospec=True):
with pytest.raises(lib.QsmDomainDoesntExistError):
dom0.is_not_template_or_throws("test-template", must_exist=True) | 5,332,767 |
def length(self: Set[A]) -> int:
"""
Returns the length (number of elements) of the set. `size` is an alias for length.
Returns:
The length of the set
"""
return len(self) | 5,332,768 |
def timestamp(tdigits=8):
"""Return a unique timestamp string for the session. useful for ensuring
unique function identifiers, etc.
"""
return str(time.clock()).replace(".", "").replace("-", "")[: tdigits + 1] | 5,332,769 |
def hierarchical_mutation(original_individual: Individual, strength: float, **kwargs) -> List[Optional[Individual]]:
# TODO: Double Check
"""Choose a node in the graph_manager, choose a parameter inside the node, mutate it.
Each parameter has probability: `1/len(nodes) * 1/len(parameters in that node)`.
Args:
original_individual (Individual): source individual to mutate
strength (float): mutation strength
Returns:
A list with the new mutated individual or None if it is not valid
"""
check_muation_parameters(original_individual, strength)
new_individual = clone_individual(original_individual)
new_individual.parents = {original_individual}
new_individual.operator = hierarchical_mutation
# Do while not (rnd.random() < strength)
while True:
# Use "while loop" to try choosing a node that doesn't contain only the Information parameter or the mutation
# had no effect
while True:
# Choose a node that contains the parameter to mutate
chosen_node = random_generator.choice(new_individual.nodes())
# Create a list of parameters contained into the macro
candidate_parameters = list()
for parameter_name, parameter in new_individual.nodes[chosen_node]['parameters'].items():
if not isinstance(parameter, Information):
candidate_parameters.append(parameter)
# If I tried to mutate a macro that contains only an Information parameter -> pick another node to mutate
# else -> mutate a random parameter
if candidate_parameters:
# Choose only one parameter to mutate in the list of all parameters of the chosen macro
chosen_parameter = random_generator.choice(candidate_parameters)
assert strength
chosen_parameter.mutate(strength)
break
# Stop condition
if strength == 1.0 or not (random_generator.random() < strength):
break
new_individual.finalize()
if not new_individual.valid:
return [None]
else:
# print_individual(original_individual, 'ORIGINAL', True)
# print_individual(individual, 'MUTATED', True)
return [new_individual] | 5,332,770 |
def is_wrapped_exposed_object(obj):
"""
Return True if ``obj`` is a Lua (lupa) wrapper for a BaseExposedObject
instance
"""
if not hasattr(obj, 'is_object') or not callable(obj.is_object):
return False
return bool(obj.is_object()) | 5,332,771 |
def no_transform(image):
"""Pass through the original image without transformation.
Returns a tuple with None to maintain compatability with processes that
evaluate the transform.
"""
return (image, None) | 5,332,772 |
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
"""Builds a menu with the given style using the provided buttons
:return:
list of buttons
"""
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, [header_buttons])
if footer_buttons:
menu.append([footer_buttons])
return menu | 5,332,773 |
def askAgentAndExecLocalCode(agent, method, **k):
""" agent, method, dict. of the parameters (may be empty)"""
setLocalCode("")
if common.debug:
applyMethod(agent, method, **k)
else:
try:
applyMethod(agent, method, **k)
except SystemExit:
print('method', method,'raising an exit condition while '+\
'acting on agent number', a.number, 'of type ', a.agType)
os.sys.exit(1)
except BaseException:
print('cannot apply (case 3) method', method, 'to agent number',
a.number)
pass
execLocalCode() | 5,332,774 |
def dbbox2result(dbboxes, labels, num_classes):
"""
Convert detection results to a list of numpy arrays.
:param dbboxes: (Tensor): shape (n, 9)
:param labels: (Tensor): shape (n, )
:param num_classes: (int), class number, including background class
:return: list (ndarray): dbbox results of each class
"""
# TODO: merge it with bbox2result
if dbboxes.shape[0] == 0:
return [
np.zeros((1, 9), dtype=np.float32) for i in range(num_classes - 1)
]
else:
dbboxes = dbboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [dbboxes[labels == i, :] for i in range(num_classes - 1)]
# result = [dbboxes[labels == i, :] for i in range(num_classes - 1)]
# for i, each_class in enumerate(result):
# if each_class.shape[0] == 0:
# result[i] = np.zeros((1, 9))
# return result | 5,332,775 |
def SSderivative(ds):
"""
Given a time-step ds, and an single input time history u, this SS model
returns the output y=[u,du/ds], where du/dt is computed with second order
accuracy.
"""
A = np.array([[0]])
Bm1 = np.array([0.5 / ds])
B0 = np.array([[-2 / ds]])
B1 = np.array([[1.5 / ds]])
C = np.array([[0], [1]])
D = np.array([[1], [0]])
# change state
Aout, Bout, Cout, Dout = SSconv(A, B0, B1, C, D, Bm1)
return Aout, Bout, Cout, Dout | 5,332,776 |
def sigterm_handler(_signo, _stack_frame):
"""Clean exit on SIGTERM signal (when systemd stops the process)"""
sys.exit(0) | 5,332,777 |
def clean_post(value):
"""Remove unwanted elements in post content"""
doc = lxml.html.fragment_fromstring(value)
doc.tag = 'div' # replaces <li>
doc.attrib.clear()
# remove comment owner info
for e in doc.xpath('//div[@class="weblog_keywords"]'):
e.drop_tree()
return lxml.html.tostring(doc) | 5,332,778 |
def test_get_headers(test_app):
"""
Checks if the default header values are correct
and if it updates on params
"""
r = Requester('url', 'token')
res = r._get_headers()
assert res == { 'Authorization': 'Bearer token' }
res_with_extra = r._get_headers({ 'Content-Type': 'application/json' })
assert res_with_extra == {
'Authorization': 'Bearer token',
'Content-Type': 'application/json'
}
res_with_extras = r._get_headers({
'Content-Type': 'application/json',
'Keep-Alive': 'timeout=5, max=1000'
})
assert res_with_extras == {
'Authorization': 'Bearer token',
'Content-Type': 'application/json',
'Keep-Alive': 'timeout=5, max=1000'
} | 5,332,779 |
def get_houdini_version(as_string=True):
"""
Returns version of the executed Houdini
:param as_string: bool, Whether to return the stiring version or not
:return: variant, int or str
"""
if as_string:
return hou.applicationVersionString()
else:
return hou.applicationVersion() | 5,332,780 |
def check_port_open(port: int) -> bool:
"""
Проверка на свободный порт port
Является частью логики port_validation
"""
try:
sock = socket.socket()
sock.bind(("", port))
sock.close()
print(f"Порт {port} свободен")
return True
except OSError:
print(f"Порт {port} занят")
return False | 5,332,781 |
def view_all_recommended():
"""View all recommended bets"""
main_cur.execute("SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec "
"FROM match_analysis where bet_result isnull AND rec_bet <> 'Avoid' "
"AND match_datetime > datetime('now') ORDER BY percentage_rec DESC")
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " + rec_bet
+ " | " + str(round(percentage_rec, 2))) | 5,332,782 |
def convert_date_to_tick_tick_format(datetime_obj, tz: str):
"""
Parses ISO 8601 Format to Tick Tick Date Format
It first converts the datetime object to UTC time based off the passed time zone, and then
returns a string with the TickTick required date format.
!!! info Required Format
ISO 8601 Format Example: 2020-12-23T01:56:07+00:00
TickTick Required Format: 2020-12-23T01:56:07+0000 -> Where the last colon is removed for timezone
Arguments:
datetime_obj (datetime): Datetime object to be parsed.
tz: Time zone string.
Returns:
str: The TickTick accepted date string.
??? info "Import Help"
```python
from ticktick.helpers.time_methods import convert_iso_to_tick_tick_format
```
??? example
```python
date = datetime(2022, 12, 31, 14, 30, 45)
converted_date = convert_iso_to_tick_tick_format(date, 'US/Pacific')
```
??? success "Result"
The proper format for a date string to be used with TickTick dates.
```python
'2022-12-31T22:30:45+0000'
```
"""
date = convert_local_time_to_utc(datetime_obj, tz)
date = date.replace(tzinfo=datetime.timezone.utc).isoformat()
date = date[::-1].replace(":", "", 1)[::-1]
return date | 5,332,783 |
def check_path(path: str):
"""Checks if a path exists and creates it if it doesn't
Parameters
----------
path
The string of the path to check/create
"""
if not os.path.exists(path):
os.makedirs(path) | 5,332,784 |
def measurement(resp, p):
"""model measurement effects in the filters by translating the response at
each location and stimulus (first 3 axes of resp) toward the filterwise mean
(4th axis) according to proportion p. p=1 means that all filters reduce
to their respective means; p=0 does nothing; p<0 is possible but probably
not something you want."""
resp = tf.convert_to_tensor(resp)
# average the filter dim
meanresp = tf.reduce_mean(resp, axis=3, keepdims=False)
# make resp the origin of meanresp and scale by p
transresp = (meanresp[:, :, :, None] - resp) * p
return resp + transresp | 5,332,785 |
def irpf(salario,base=12.5,prorrateo=0):
"""Entra el salario y la base, opcionalmente un parametro para prorratear
Si no se da el valor de la bas3e por defecto es 12.5"""
if type(salario)==float and type(base)==float:
if prorrateo==True:
return (salario*(1+2/12))*(base/100)
elif prorrateo==False:
return salario*(base/100)
else:
return None | 5,332,786 |
def get2p3dSlaterCondonUop(Fdd=(9, 0, 8, 0, 6), Fpp=(20, 0, 8), Fpd=(10, 0, 8), Gpd=(0, 3, 0, 2)):
"""
Return a 2p-3d U operator containing a sum of
different Slater-Condon proccesses.
Parameters
----------
Fdd : tuple
Fpp : tuple
Fpd : tuple
Gpd : tuple
"""
# Calculate F_dd^{0,2,4}
FddOp = getUop(l1=2,l2=2,l3=2,l4=2,R=Fdd)
# Calculate F_pp^{0,2}
FppOp = getUop(l1=1,l2=1,l3=1,l4=1,R=Fpp)
# Calculate F_pd^{0,2}
FpdOp1 = getUop(l1=1,l2=2,l3=2,l4=1,R=Fpd)
FpdOp2 = getUop(l1=2,l2=1,l3=1,l4=2,R=Fpd)
FpdOp = addOps([FpdOp1,FpdOp2])
# Calculate G_pd^{1,3}
GpdOp1 = getUop(l1=1,l2=2,l3=1,l4=2,R=Gpd)
GpdOp2 = getUop(l1=2,l2=1,l3=2,l4=1,R=Gpd)
GpdOp = addOps([GpdOp1,GpdOp2])
# Add operators
uOp = addOps([FddOp,FppOp,FpdOp,GpdOp])
return uOp | 5,332,787 |
def read_meta_fs(filename: AnyStr):
"""
Read meta data from disk.
"""
settings.Path(filename).mkdir(parents=True, exist_ok=True)
filepath = settings.pj(filename, "meta.pkl")
with open(filepath, "rb") as fh:
return pickle.load(fh) | 5,332,788 |
def compile(model, ptr, vtr, num_y_per_branch=1):
"""Create a list with ground truth, loss functions and loss weights.
"""
yholder_tr = []
losses = []
loss_weights = []
num_blocks = int(len(model.output) / (num_y_per_branch + 1))
printcn(OKBLUE,
'Compiling model with %d outputs per branch and %d branches.' %
(num_y_per_branch, num_blocks))
for i in range(num_blocks):
for j in range(num_y_per_branch):
yholder_tr.append(ptr)
losses.append(elasticnet_loss_on_valid_joints)
loss_weights.append(1.)
yholder_tr.append(vtr)
losses.append('binary_crossentropy')
loss_weights.append(0.01)
printcn(OKBLUE, 'loss_weights: ' + str(loss_weights))
model.compile(loss=losses, optimizer=RMSprop(), loss_weights=loss_weights)
return yholder_tr | 5,332,789 |
def strip_clean(input_text):
"""Strip out undesired tags.
This removes tags like <script>, but leaves characters like & unescaped.
The goal is to store the raw text in the database with the XSS nastiness.
By doing this, the content in the database is raw
and Django can continue to assume that it's unsafe by default.
"""
return html.unescape(bleach.clean(input_text, strip=True)) | 5,332,790 |
def main(configs, project, bucket_path):
"""Loads metric config files and runs each metric."""
if not project:
raise ValueError('project', project)
if not bucket_path:
raise ValueError('bucket_path', bucket_path)
if 'VELODROME_INFLUXDB_CONFIG' in os.environ:
influx = influxdb.Pusher.from_config(os.environ['VELODROME_INFLUXDB_CONFIG'])
else:
influx = None
# the 'bq show' command is called as a hack to dodge the config prompts that bq presents
# the first time it is run. A newline is passed to stdin to skip the prompt for default project
# when the service account in use has access to multiple projects.
check(['bq', 'show'], stdin='\n')
errs = []
for path in configs or all_configs():
try:
with open(path) as config_raw:
config = yaml.safe_load(config_raw)
if not config:
raise ValueError('invalid yaml: %s.' % path)
metric = config['metric'].strip()
validate_metric_name(metric)
run_metric(
project,
bucket_path,
metric,
config['query'],
config['jqfilter'],
config.get('jqmeasurements'),
influx,
)
except (
ValueError,
KeyError,
IOError,
subprocess.CalledProcessError,
influxdb.Error,
):
print >>sys.stderr, traceback.format_exc()
errs.append(path)
if errs:
print 'Failed %d configs: %s' % (len(errs), ', '.join(errs))
sys.exit(1) | 5,332,791 |
def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,
draw_probability=DRAW_PROBABILITY, backend=None, env=None):
"""Setups the global environment.
:param env: the specific :class:`TrueSkill` object to be the global
environment. It is optional.
>>> Rating()
trueskill.Rating(mu=25.000, sigma=8.333)
>>> setup(mu=50) #doctest: +ELLIPSIS
trueskill.TrueSkill(mu=50.000, ...)
>>> Rating()
trueskill.Rating(mu=50.000, sigma=8.333)
"""
if env is None:
env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend)
global_env.__trueskill__ = env
return env | 5,332,792 |
def _bytepad(x, length):
"""Zero pad byte string as defined in NIST SP 800-185"""
to_pad = _left_encode(length) + x
# Note: this implementation works with byte aligned strings,
# hence no additional bit padding is needed at this point.
npad = (length - len(to_pad) % length) % length
return to_pad + b'\x00' * npad | 5,332,793 |
def print_profile(command, max_ratio=1.0, filter_text=None):
"""
集計します。
:param u command: 実行するコマンド ex. test()
:param float max_ratio: 表示する上限 < 1.0
:rtype: None
"""
# 変数を初期化します。
logger = getLogger()
prof = cProfile.Profile()
# 速度を計測します。
prof = prof.run(command)
#stream = io.StringIO()
stream = io.BytesIO()
stats = pstats.Stats(prof, stream=stream)
# time で sort します
stats.sort_stats(u"time")
# 表示する上限まで出力します
if filter_text is None:
stats.print_stats(max_ratio)
else:
stats.print_stats(filter_text, max_ratio)
# 結果を出力します。
logger.info("Profile Result:\n%s", stream.getvalue())
return | 5,332,794 |
def str2bool(val):
"""enable default constant true arguments"""
# https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(val, bool):
return val
elif val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected') | 5,332,795 |
def get_scalefactor(metadata):
"""Add scaling factors to the metadata dictionary
:param metadata: dictionary with CZI or OME-TIFF metadata
:type metadata: dict
:return: dictionary with additional keys for scling factors
:rtype: dict
"""
# set default scale factore to 1
scalefactors = {'xy': 1.0,
'zx': 1.0
}
try:
# get the factor between XY scaling
scalefactors['xy'] = metadata['XScale'] / metadata['YScale']
# get the scalefactor between XZ scaling
scalefactors['zx'] = metadata['ZScale'] / metadata['YScale']
except KeyError as e:
print('Key not found: ', e)
return scalefactors | 5,332,796 |
def alignmentEntropy(align, statistic='absolute', removeGaps=False, k=1, logFunc=np.log):
"""Calculates the entropy in bits of each site (or kmer) in a sequence alignment.
Also can compute:
- "uniqueness" which I define to be the fraction of unique sequences
- "uniquenum" which is the number of unique sequences
Parameters
----------
align : pd.Series() or list
Alignment of sequences.
statistic : str
Statistic to be computed: absolute, uniqueness
Uniqueness is the fraction of unique sequences.
Uniquenum is the number of unique AA at each position.
removeGaps : bool
Remove from the alignment at each position, kmers that start with a gap character.
Also use "non-gapped kmers" (ie skipping gaps)
k : int
Length of the kmer to consider at each start position in the alignment.
(default 1 specifies site-wise entropy)
logFunc : function
Default is natural log, returning nats. Can also use log2 for bits.
Return
------
out : float
Output statistic."""
if removeGaps:
grabKmerFlag = 1
else:
grabKmerFlag = 0
align = padAlignment(align)
L = len(align[align.index[0]])
nKmers = L - k + 1
entropy = np.zeros(nKmers, dtype=float)
for aai in np.arange(nKmers):
kmers = [grabKmer(seq, aai, k)[grabKmerFlag] for seq in align]
"""kmers that start with a gap or that are at the end and are of insufficent length, will be None"""
kmers = [mer for mer in kmers if not mer is None]
oh = objhist(kmers)
if statistic == 'absolute':
entropy[aai] = oh.entropy()
elif statistic == 'uniqueness':
entropy[aai] = oh.uniqueness()
elif statistic == 'uniquenum':
entropy[aai] = len(list(oh.keys()))
return entropy | 5,332,797 |
def NodeToString(xml_node):
"""Returns an XML string.
Args:
xml_node: xml.dom.Node object
Returns:
String containing XML
"""
return xml_node.toxml() | 5,332,798 |
def difference(data, interval):
""" difference dataset
parameters:
data: dataset to be differenced
interval: the interval between the two elements to be differenced.
return:
dataset: with the length = len(data) - interval
"""
return [data[i] - data[i - interval] for i in range(interval, len(data))] | 5,332,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.