content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def histogram(name, tensor, max_bins):
""" 转换直方图数据到potobuf格式 """
values = make_np(tensor)
sum_sq, bucket_limit, bucket = make_histogram(values.astype(float), max_bins)
hist = HistogramProto(min=values.min(), max=values.max(), num=len(values.reshape(-1)),
sum=values.sum(), sum_squares=sum_sq, bucket_limit=bucket_limit, bucket=bucket)
metadata = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name='histograms'))
return Summary(value=[Summary.Value(tag=name,
histo=hist,
metadata=metadata)]) | 9f9d04b281f018ea02276c29ae4b8c83b4790799 | 3,632,100 |
def max_pool_1d(input_tensor, pool_sizes=(2), stride_sizes=(1), paddings='same', names=None):
"""[summary]
Arguments:
input_tensor {[float, double, int32, int64, uint8, int16, or int8]} -- [A Tensor representing prelayer values]
Keyword Arguments:
pool_size {tuple} -- [Size of kernel] (default: {(2,2)})
stride_sizes {tuple} -- [Size of striding of kernel] (default: {(1,1)})
paddings {str} -- [Indicating the type of padding algorithm to use] (default: {'same'})
names {[str]} -- [Name of the layer] (default: {None})
Returns:
[Tensor] -- [A layer with maxpool 1D with dtype tf.float32]
"""
if names!=None:
names = str(names)
else:
names = 'max_pool_1d'
if paddings=='Valid' or paddings=='valid' or paddings=='VALID':
paddings = 'VALID'
elif paddings=='Same' or paddings=='same' or paddings=='SAME':
paddings = 'SAME'
else:
paddings = 'SAME'
layer = tf.nn.max_pool1d(input=input_tensor, ksize=pool_sizes, strides=stride_sizes, padding=paddings,name=None)
return layer | 9124428ba21d8108fd95b95cf36c97f147f2c1dc | 3,632,101 |
import base64
import gzip
import json
def decompress_metadata_string_to_dict(input_string): # pylint: disable=invalid-name
"""
Convert compact string format (dumped, gzipped, base64 encoded) from
IonQ API metadata back into a dict relevant to building the results object
on a returned job.
Parameters:
input_string (str): compressed string format of metadata dict
Returns:
dict: decompressed metadata dict
"""
if input_string is None:
return None
encoded = input_string.encode()
decoded = base64.b64decode(encoded)
decompressed = gzip.decompress(decoded)
return json.loads(decompressed) | c521da786d2a9f617c560916cc5f058b20cb3e21 | 3,632,102 |
import subprocess
import re
import tempfile
import os
def call_ck(i):
"""
Input: {
Input for CK
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(stdout) - stdout, if available
(stderr) - stderr, if available
(std) - stdout+stderr
}
"""
# Check action
action = i.get('action', '')
if action == '':
return {'return': 1, 'error': 'action is not defined'}
# Check that no special characters, otherwise can run any command from CMD
if not re.match('^[A-Za-z0-9-_]*$', action):
return {'return': 1, 'error': 'action contains illegal characters'}
# Generate tmp file
# suffix is important - CK will delete such file!
fd, fn = tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd)
dc = i.get('detach_console', '')
if dc == 'yes':
i['out'] = 'con' # If detach, output as console
# Prepare dummy output
rr = {'return': 0}
rr['stdout'] = ''
rr['stderr'] = ''
# Save json to temporay file
rx = ck.save_json_to_file({'json_file': fn, 'dict': i})
if rx['return'] > 0:
return rx
# Prepare command line
cmd = 'ck '+action+' @'+fn
if dc == 'yes':
# Check platform
rx = ck.get_os_ck({})
if rx['return'] > 0:
return rx
plat = rx['platform']
dci = ck.cfg.get('detached_console', {}).get(plat, {})
dcmd = dci.get('cmd', '')
if dcmd == '':
return {'return': 1, 'error': 'detached console is requested but cmd is not defined in kernel configuration'}
dcmd = dcmd.replace('$#cmd#$', cmd)
if dci.get('use_create_new_console_flag', '') == 'yes':
process = subprocess.Popen(dcmd, stdin=None, stdout=None, stderr=None,
shell=True, close_fds=True, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
# Will need to do the forking
try:
pid = os.fork()
except OSError as e:
return {'return': 1, 'error': 'forking detached console failed ('+format(e)+')'}
if pid == 0:
os.setsid()
pid = os.fork()
if pid != 0:
os._exit(0)
try:
maxfd = os.sysconf("SC_OPEN_MAX")
except (AttributeError, ValueError):
maxfd = 1024
for fd in range(maxfd):
try:
os.close(fd)
except OSError:
pass
os.open('/dev/null', os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
# Normally child process
process = os.system(dcmd)
os._exit(0)
stdout = ck.cfg.get('detached_console_html',
'Console was detached ...')
stderr = ''
else:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
try:
stdout = stdout.decode('utf8')
except Exception as e:
pass
try:
stderr = stderr.decode('utf8')
except Exception as e:
pass
rr['std'] = stdout+stderr
rr['stdout'] = stdout
rr['stderr'] = stderr
return rr | 06aaf0f4d3b740160312bd7fcb1d65b179c9a3e3 | 3,632,103 |
import struct
import socket
def inet_atoni(ip):
"""Like inet_aton() but returns an integer."""
return struct.unpack('>I', socket.inet_aton(ip))[0] | 3bd18b7aecf9a5a45033c7873163ee1387cb8a13 | 3,632,104 |
from typing import Optional
import warnings
def align_method_FRAME(
left, right, axis, flex: Optional[bool] = False, level: Level = None
):
"""
Convert rhs to meet lhs dims if input is list, tuple or np.ndarray.
Parameters
----------
left : DataFrame
right : Any
axis: int, str, or None
flex: bool or None, default False
Whether this is a flex op, in which case we reindex.
None indicates not to check for alignment.
level : int or level name, default None
Returns
-------
left : DataFrame
right : Any
"""
def to_series(right):
msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}"
if axis is not None and left._get_axis_name(axis) == "index":
if len(left.index) != len(right):
raise ValueError(
msg.format(req_len=len(left.index), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(
msg.format(req_len=len(left.columns), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError(
"Unable to coerce to DataFrame, shape "
f"must be {left.shape}: given {right.shape}"
)
elif right.ndim > 2:
raise ValueError(
"Unable to coerce to Series/DataFrame, "
f"dimension must be <= 2: {right.shape}"
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
# GH 36702. Raise when attempting arithmetic with list of array-like.
if any(is_array_like(el) for el in right):
raise ValueError(
f"Unable to coerce list of {type(right[0])} to Series/DataFrame"
)
# GH17901
right = to_series(right)
if flex is not None and isinstance(right, ABCDataFrame):
if not left._indexed_same(right):
if flex:
left, right = left.align(right, join="outer", level=level, copy=False)
else:
raise ValueError(
"Can only compare identically-labeled DataFrame objects"
)
elif isinstance(right, ABCSeries):
# axis=1 is default for DataFrame-with-Series op
axis = left._get_axis_number(axis) if axis is not None else 1
if not flex:
if not left.axes[axis].equals(right.index):
warnings.warn(
"Automatic reindexing on DataFrame vs Series comparisons "
"is deprecated and will raise ValueError in a future version. "
"Do `left, right = left.align(right, axis=1, copy=False)` "
"before e.g. `left == right`",
FutureWarning,
stacklevel=5,
)
left, right = left.align(
right, join="outer", axis=axis, level=level, copy=False
)
right = _maybe_align_series_as_frame(left, right, axis)
return left, right | 77e30224e3d1e0077bc8b2fbac3cddc405b724b6 | 3,632,105 |
import re
def rep_unicode_in_code(code):
""" Replace unicode to str in the code
like '\u003D' to '='
:param code: type str
:return: type str
"""
pattern = re.compile('(\\\\u[0-9a-zA-Z]{4})')
m = pattern.findall(code)
for item in set(m):
code = code.replace(item, chr(int(item[2:], 16))) # item[2:]去掉\u
return code | 70e28ea741f0347190628876b59e27a56a5c0ccf | 3,632,106 |
def delete_bucket_on_project(current_session, project_name, bucket_name):
"""
Remove a bucket from a project, both on the userdatamodel
and on the storage associated with that bucket.
Returns a dictionary.
"""
response = pj.delete_bucket_on_project(current_session, project_name, bucket_name)
if response["result"] == "success":
current_app.storage_manager.delete_bucket(
response["provider"].name, bucket_name
)
return {"result": "success"}
else:
current_app.storage_manager.delete_bucket(
response["provider"].name, bucket_name
)
return {"result": response["result"]} | 13ce8ef1b98bddbbed9cacecce9495914e2f723d | 3,632,107 |
def redir(url, text=None, target='_blank'):
"""Links to a redirect page
"""
text = text or url
html = '<a href="%(url)s" target="%(target)s">%(text)s</a>' % {
'url' : redir_url(url),
'text' : text,
'target' : target,
}
html = mark_safe(html)
return html | f1b5930887b3e0f2cce4751d2368dd2b4df6d5a2 | 3,632,108 |
def notifier_limit_over_checker(username, language_code, hard_limit):
"""
Function that takes a username and checks if they're above a hard monthly limit. This is currently UNUSED.
True if they're over the limit, False otherwise.
:param username: The username of the person.
:param language_code: The language code for the
:param hard_limit: The hard monthly limit for notifications per user per language. This means that once they exceed
this amount they would no longer get notifications for this language. This is an integer.
:return: True if they are over the limit, False otherwise.
"""
# Fetch the data
sql_lw = "SELECT * FROM notify_monthly_limit WHERE username = ?"
cursor_main.execute(sql_lw, (username,))
user_data = cursor_main.fetchone()
if user_data is None: # No record of this user, so it's okay to send.
return False
else: # There's data already for this username.
monthly_limit_dictionary = eval(user_data[1])
# Attempt to get the number of notifications user has received for this language.
if language_code in monthly_limit_dictionary:
current_number_notifications = monthly_limit_dictionary[language_code]
else: # There is no entry, so this must be zero.
current_number_notifications = 0
if current_number_notifications > hard_limit: # Over the limit
return True
else:
return False | a6e939f04a6b1598241d00a2c6dc6399df629d60 | 3,632,109 |
def lateral_steering_transform(steering, camera_position):
"""
Transform the steering of the lateral cameras (left, right)
Parameters:
steering (numpy.float): Original steering
Returns:
out (numpy.float): New steering
"""
if (camera_position == LEFT):
steering += STEERING_CORRECTION
if (camera_position == RIGHT):
steering -= STEERING_CORRECTION
return steering | 7530b91fc4c59427f5a8fe22714ee48718bfed5f | 3,632,110 |
def fv_creator(fp, df, F, int_fwm):
"""
Cretes frequency grid such that the estimated MI-FWM bands
will be on the grid and extends this such that to avoid
fft boundary problems.
Inputs::
lamp: wavelength of the pump (float)
lamda_c: wavelength of the zero dispersion wavelength(ZDW) (float)
int_fwm: class that holds nt (number of points in each band)
betas: Taylor coeffiencts of beta around the ZDW (Array)
M : The M coefficient (or 1/A_eff) (float)
P_p: pump power
Df_band: band frequency bandwidth in Thz, (float)
Output::
fv: Frequency vector of bands (Array of shape [nt])
"""
f_centrals = [fp + i * F for i in range(-1, 2)]
fv1 = np.linspace(f_centrals[0], f_centrals[1],
int_fwm.nt//4 - 1, endpoint=False)
df = fv1[1] - fv1[0]
fv2 = np.linspace(f_centrals[1], f_centrals[2], int_fwm.nt//4)
try:
assert df == fv2[1] - fv2[0]
except AssertionError:
print(df, fv2[1] - fv2[0])
fv0, fv3 = np.zeros(int_fwm.nt//4 + 1), np.zeros(int_fwm.nt//4)
fv0[-1] = fv1[0] - df
fv3[0] = fv2[-1] + df
for i in range(1, len(fv3)):
fv3[i] = fv3[i - 1] + df
for i in range(len(fv0) - 2, -1, -1):
fv0[i] = fv0[i + 1] - df
assert not(np.any(fv0 == fv1))
assert not(np.any(fv1 == fv2))
assert not(np.any(fv2 == fv3))
fv = np.concatenate((fv0, fv1, fv2, fv3))
for i in range(3):
assert f_centrals[i] in fv
check_ft_grid(fv, df)
p_pos = np.where(np.abs(fv - fp) == np.min(np.abs(fv - fp)))[0]
return fv, p_pos, f_centrals | 315dcc82fa3cd39937d905092f3b47de807d4e9f | 3,632,111 |
def calc_F1_score(scores, changepoints, tolerance_delay, tuned_threshold=None, div=500, both=True):
"""
Calculate F1 score. If tuned_threshold is None, return the tuned threshold.
Args:
scores: the change scores or change sign scores
changepoints: changepoints or starting points of gradual changes
tolerance_delay: tolerance delay for change or change sign detection
tuned_threshold: the tuned threshold. If it is None, tune the threshold and
return metrics with it.
div: sampling points for calculating the AUC
both: how to define the section of benefits
Returns:
float, float, float, Optional[float]: F1 score, precision, recall, tuned threshold
"""
# basic statistics
score_max = np.nanmax(scores)
score_min = np.nanmin(scores)
_scores = np.nan_to_num(scores, nan=score_min) # fill nan
if tuned_threshold == None:
# statistics
max_F1_score = 0
max_precision = 0 # the precision it achieves maximum F1 score
max_recall = 0 # the recall it achieves maximum F1 score
opt_threshold = 0
# statistics list
F1_scores = np.zeros(div)
precisions = np.zeros(div)
recalls = np.zeros(div)
thresholds = np.zeros(div)
for i in range(div):
threshold = score_min + i * (score_max - score_min) / (div - 1)
alarms = np.where(_scores >= threshold, 1, 0)
alarms[0] = 0
alarms[-1] = 0
diff = np.diff(alarms)
estimated_changepoints_prev = []
start = np.where(diff == 1)[0]
end = np.where(diff == -1)[0]
for j in range(start.size):
s = start[j]
e = end[j]
# estimated_changepoints_prev.append(
# s + np.argmax(scores[s:e + 1]))
estimated_changepoints_prev.append(s)
estimated_changepoints_prev = np.array(estimated_changepoints_prev)
TP = 0
for c in changepoints:
if both == True:
estimated_changepoints = np.where((estimated_changepoints_prev >= c - tolerance_delay) & (estimated_changepoints_prev <= c + tolerance_delay),
-1, estimated_changepoints_prev)
elif both == False:
estimated_changepoints = np.where((estimated_changepoints_prev >= c) & (estimated_changepoints_prev <= c + tolerance_delay),
-1, estimated_changepoints_prev)
if not (estimated_changepoints == estimated_changepoints_prev).all():
TP += 1
estimated_changepoints_prev = np.copy(estimated_changepoints)
FN = len(changepoints) - TP
FP = len(np.where(estimated_changepoints_prev != -1)[0])
if TP == 0 and FP == 0:
precision = 0
else:
precision = TP / (TP + FP)
if TP == 0 and FN == 0:
recall = 0
else:
recall = TP / (TP + FN)
if precision == 0 and recall == 0:
F1_score = 0
else:
F1_score = 2 * precision * recall / (precision + recall)
F1_scores[i] = F1_score
precisions[i] = precision
recalls[i] = recall
thresholds[i] = threshold
max_F1_score = np.max(F1_scores)
idx = np.where(F1_scores == max_F1_score)[0]
middle_idx = int(np.mean(idx))
max_precision = precisions[middle_idx]
max_recall = recalls[middle_idx]
opt_threshold = thresholds[middle_idx]
return max_F1_score, max_precision, max_recall, opt_threshold
else:
alarms = np.where(_scores >= tuned_threshold, 1, 0)
diff = np.diff(alarms)
estimated_changepoints_prev = np.where(diff == 1)[0] # change-points
TP = 0
for c in changepoints:
if both == True:
estimated_changepoints = np.where((estimated_changepoints_prev >= c - tolerance_delay) & (estimated_changepoints_prev <= c + tolerance_delay),
-1, estimated_changepoints_prev)
elif both == False:
estimated_changepoints = np.where((estimated_changepoints_prev >= c) & (estimated_changepoints_prev <= c + tolerance_delay),
-1, estimated_changepoints_prev)
if not (estimated_changepoints == estimated_changepoints_prev).all():
TP += 1
estimated_changepoints_prev = np.copy(estimated_changepoints)
FN = len(changepoints) - TP
FP = len(np.where(estimated_changepoints_prev != -1)[0])
if TP == 0 and FP == 0:
precision = 0
else:
precision = TP / (TP + FP)
if TP == 0 and FN == 0:
recall = 0
else:
recall = TP / (TP + FN)
if precision == 0 and recall == 0:
F1_score = 0
else:
F1_score = 2 * precision * recall / (precision + recall)
return F1_score, precision, recall | 50865a03b47c30feb43352f6131c2e867318b1a0 | 3,632,112 |
def mul_pt_exn(pt, curve, k):
"""Computes point kP given point P, curve and k using Montgomery Ladder.
Args:
pt (tuple(int, int)): Point P.
curve (tuple(int, int, int)): Curve.
k (int): Multiplier.
Raises:
InverseNotFound: Thrown when point kP is the point at infinity.
Returns:
tuple(int, int): Point kP.
"""
if k <= 2:
if k < 0:
# x and z coordinates are the same for P and -P.
return mul_pt_exn(pt, curve, -k)
if k == 0:
# InverseNotFound will be thrown
return check((0, 0), curve)
if k == 1:
return check(pt, curve)
return check(dbl_pt(pt, curve), curve)
res0 = pt
res1 = dbl_pt(pt, curve)
j = k.bit_length() - 2
while j >= 1:
if (k >> j) % 2 == 1:
res0 = add_pt(res1, res0, pt, curve)
res1 = dbl_pt(res1, curve)
else:
res1 = add_pt(res1, res0, pt, curve)
res0 = dbl_pt(res0, curve)
j -= 1
if k % 2 == 1:
res0 = add_pt(res1, res0, pt, curve)
else:
res0 = dbl_pt(res0, curve)
return check(res0, curve) | 236dc176a2cf644a5c93054a93cc02535b4b77ef | 3,632,113 |
def get_enter_room_url():
"""
swagger-doc: 'schedule'
required: []
req:
course_schedule_id:
description: '课节id'
type: 'string'
res:
url:
description: '访问地址'
type: ''
"""
course_schedule_id = request.json['course_schedule_id']
with session_scope(db) as session:
courseschedule = session.query(CourseSchedule).filter_by(id=course_schedule_id).one_or_none()
if courseschedule is None:
return jsonify({
"error": "not found course_schedule: {0}".format(
course_schedule_id)
}), 500
courseclassroom = session.query(CourseClassroom).filter_by(course_schedule_id =courseschedule.id).one_or_none()
if courseclassroom is None :
return jsonify({
"error": "found courseclassroom existing in {0}".format(
course_schedule_id)
}), 500
u = ClassroomRoleEnum.SIT_IN.name
if courseschedule.schedule_type.name == 'INTERVIEW':
u = ClassroomRoleEnum.TEACHER.name
current_app.logger.debug('nickName--1212-----'+str(courseschedule.schedule_type))
url = live_service.enter_room(getattr(g, current_app.config['CUR_USER'])['username'],courseclassroom.room_id,getattr(g, current_app.config['CUR_USER'])['name'],
u,ClassroomDeviceEnum.PC.name)
return jsonify({'url':url }) | 53f55d5fc5b2e789f25561b0f02291317370fec0 | 3,632,114 |
def get_scene_nodes():
"""
Returns al nodes in current scene as GamEX nodes
:return: list<gx.dcc.DCCNode>
"""
node_list = list()
_append_children(get_root(), node_list)
return node_list, len(node_list) | 264bd1920f43aa4caaf07f17dbfbb3ca6bf1d2e1 | 3,632,115 |
import pdb
def interpolate(src_codes, dst_codes, step=5):
"""Interpolates two sets of latent codes linearly.
Args:
src_codes: Source codes, with shape [num, *code_shape].
dst_codes: Target codes, with shape [num, *code_shape].
step: Number of interplolation steps, with source and target included. For
example, if `step = 5`, three more samples will be inserted. (default: 5)
Returns:
Interpolated codes, with shape [num, step, *code_shape].
Raises:
ValueError: If the input two sets of latent codes are with different shapes.
"""
if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
raise ValueError(
f"Shapes of source codes and target codes should both be "
f"[num, *code_shape], but {src_codes.shape} and "
f"{dst_codes.shape} are received!"
)
num = src_codes.shape[0]
code_shape = src_codes.shape[1:]
a = src_codes[:, np.newaxis]
b = dst_codes[:, np.newaxis]
l = np.linspace(0.0, 1.0, step).reshape(
[step if axis == 1 else 1 for axis in range(a.ndim)]
)
results = a + l * (b - a)
assert results.shape == (num, step, *code_shape)
pdb.set_trace()
return results | e8c1e813e3445c03cfb0f841b0ae60eacfedb27f | 3,632,116 |
def get_flavored_transforms(penalty, kind):
"""
Gets the tranformation functions for all flavored penalties.
Parameters
----------
penalty: PenaltyConfig, PenaltyTuner
The penalty configs/tuner whose flavored penalties we want.
kind: str
Which kind of flavor we want; ['adaptive', 'non_convex']
Output
------
transforms, flavors, flavor_keys
transforms: list of callable(coef)
flavors: list of FlavorConfigs
flavor_keys: list of str
"""
tree = build_penalty_tree(penalty)
penalties, penalty_keys = extract_penalties(tree)
#########################
# get requested flavors #
#########################
flavors, flavor_keys, parent_pens = \
extract_flavors_and_pens(penalties=penalties,
penalty_keys=penalty_keys,
restrict=kind)
############################################
# get transforms for each flavored penalty #
############################################
# pull out the transform for each flavor
transforms = []
for flav_key, parent_pen in zip(flavor_keys, parent_pens):
# for elastic net we need to figure out which term
# in the sum this flavor is
if isinstance(parent_pen, ElasticNetConfig):
# get the config for the enet summand corresponding
# to this flavor config
sum_name = get_enet_sum_name(flav_key)
sum_config = parent_pen.get_sum_configs(sum_name)
transf = get_non_smooth_transforms(sum_config)
else:
transf = get_non_smooth_transforms(parent_pen)
transforms.append(transf)
###########################################
# add group subsetting to transformations #
###########################################
# get keys of penalties that are Children of SeparableSum penalties
# and the corresponding groups
CoSS_keys, groups = extract_grouping_info(penalties=penalties,
keys=penalty_keys)
# if there are no group subsetting functions then we are done!
if len(groups) == 0:
return transforms, flavors, flavor_keys
# map keys to groups
group_map = {k: groups[idx] for idx, k in enumerate(CoSS_keys)}
# get the ancestor keys among the CoSS_keys for each flavored key
flav_groups = {}
for flav_key in flavor_keys:
# get ancestors
ancestors = get_ancestor_keys(key=flav_key,
candidates=CoSS_keys)
# ancestor groups starting with the oldest ancestors
flav_groups[flav_key] = [group_map[a] for a in ancestors]
# wrap the transforms in a subsetting function
subsetted_transforms = []
for i, flav_key in enumerate(flavor_keys):
new_transf = wrap_group_subsetting(func=transforms[i],
groups=flav_groups[flav_key])
subsetted_transforms.append(new_transf)
return subsetted_transforms, flavors, flavor_keys | 2d70b1da646aea7fe5dd1f1957e0e5b31f0644de | 3,632,117 |
def f(spam, eggs):
"""
:type spam: list of string
:type eggs: (bool, int, unicode)
"""
return spam, eggs | 7d315898332b099eb1105f77b08bfe69e29c051e | 3,632,118 |
def GetAllowedGitilesConfigs():
"""Returns the set of valid gitiles configurations.
The returned structure contains the tree of valid hosts, projects, and refs.
Please note that the hosts in the config are gitiles hosts instead of gerrit
hosts, such as: 'chromium.googlesource.com'.
Example config:
{
'allowed_gitiles_configs': {
'chromium.googlesource.com': {
'chromium/src': [
'refs/heads/main',
]
}
}
}
"""
return waterfall_config.GetCodeCoverageSettings().get(
'allowed_gitiles_configs', {}) | 57dd75b253ed77585f23f06a095c2f1c0bcbf23a | 3,632,119 |
import requests
def cbsodatav3_to_gcs(id, third_party=False, schema="cbs", credentials=None, GCP=None, paths=None):
"""Load CBS odata v3 into Google Cloud Storage as Parquet.
For given dataset id, following tables are uploaded into schema (taking `cbs` as default and `83583NED` as example):
- ``cbs.83583NED_DataProperties``: description of topics and dimensions contained in table
- ``cbs.83583NED_DimensionName``: separate dimension tables
- ``cbs.83583NED_TypedDataSet``: the TypedDataset
- ``cbs.83583NED_CategoryGroups``: grouping of dimensions
See `Handleiding CBS Ope Data Services (v3) <https://www.cbs.nl/-/media/statline/documenten/handleiding-cbs-opendata-services.pdf>`_ for details.
Args:
- id (str): table ID like `83583NED`
- third_party (boolean): 'opendata.cbs.nl' is used by default (False). Set to true for dataderden.cbs.nl
- schema (str): schema to load data into
- credentials: GCP credentials
- GCP: config object
- Paths: config object for output directory
Return:
- Set: Paths to Parquet files
- String: table_description
"""
base_url = {
True: f"https://dataderden.cbs.nl/ODataFeed/odata/{id}?$format=json",
False: f"https://opendata.cbs.nl/ODataFeed/odata/{id}?$format=json",
}
urls = {
item["name"]: item["url"]
for item in requests.get(base_url[third_party]).json()["value"]
}
output = {
True: paths.root / paths.tmp,
False: paths.root / paths.cbs,
}
# Getting the description of the data set.
data_set_description = table_description(urls["TableInfos"])
gcs = storage.Client(project="dataverbinders")
gcs_bucket = gcs.bucket(GCP.bucket)
files_parquet = set()
# TableInfos is redundant --> use https://opendata.cbs.nl/ODataCatalog/Tables?$format=json
# UntypedDataSet is redundant --> use TypedDataSet
for key, url in [
(k, v) for k, v in urls.items() if k not in ("TableInfos", "UntypedDataSet")
]:
url = "?".join((url, "$format=json"))
table_name = f"{schema}.{id}_{key}"
i = 0
while url:
logger = prefect.context.get("logger")
logger.info(f"Processing {key} (i = {i}) from {url}")
r = requests.get(url).json()
# odata api contains empty lists as values --> skip these
if r["value"]:
# DataProperties contains column odata.type --> odata_type
df = pd.DataFrame(r["value"]).rename(
columns=lambda s: s.replace(".", "_")
)
pq_dir = f"{output[third_party]}/{table_name}.parquet"
# Add path of file to set, when data set contains information
files_parquet.add(f"{table_name}.parquet")
cbs_table = pa.Table.from_pandas(df)
if i == 0:
# Have to append the lines, instead of overwrite.
# https://stackoverflow.com/questions/47113813/using-pyarrow-how-do-you-append-to-parquet-file/47114713
# Use gzip as compression, is one of the accepted compression types. With default level = 6.
pq_writer = pq.ParquetWriter(where=pq_dir, schema=cbs_table.schema, compression="gzip", compression_level=6)
# Write transformed df to the given file (pq_dir)
pq_writer.write_table(cbs_table)
# each request limited to 10,000 cells
if "odata.nextLink" in r:
i += 1
url = r["odata.nextLink"]
else:
# Add upload to GCS here!!!
pq_writer.close() # Close the Parquet Writer
# Name of file in GCS.
gcs_blob = gcs_bucket.blob(pq_dir.split("/")[-1])
# Upload file to GCS from given location.
gcs_blob.upload_from_filename(filename=pq_dir)
url = None
return files_parquet, data_set_description | 6283163698925cf3743660810464b359f3719720 | 3,632,120 |
def process_chunk_of_genes(packed_args):
""" Control flow of compute coverage of pangenome per species and write results """
species_id, chunk_id = packed_args[:2]
if chunk_id == -1:
global semaphore_for_species
num_of_genes_chunks = packed_args[2]
tsprint(f" MIDAS2::process_chunk_of_genes::{species_id}-{chunk_id}::wait merge_chunks_per_species")
for _ in range(num_of_genes_chunks):
semaphore_for_species[species_id].acquire()
tsprint(f" MIDAS2::process_chunk_of_genes::{species_id}-{chunk_id}::start merge_chunks_per_species")
ret = merge_chunks_per_species(species_id)
tsprint(f" MIDAS2::process_chunk_of_genes::{species_id}-{chunk_id}::finish merge_chunks_per_species")
return ret
tsprint(f" MIDAS2::process_chunk_of_genes::{species_id}-{chunk_id}::start compute_coverage_per_chunk")
ret = compute_coverage_per_chunk(species_id, chunk_id)
tsprint(f" MIDAS2::process_chunk_of_genes::{species_id}-{chunk_id}::finish compute_coverage_per_chunk")
return ret | a2f5d515a4695f1ab76bc3407241a16f99739e9f | 3,632,121 |
def get_Up(N=16, dt=0.1):
"""
INPUTS
N (int): is the length of the preview horizon;
dt (float): time step size;
OUTPUTS
Up: size [N, N] matrix;
"""
Up = np.tril(np.ones((N, N)), 0) * (1 / 6)
for i in range(N):
Up += np.diag(np.ones(N - i) * i, k=-i) / 2
Up += np.diag(np.ones(N - i) * (i ** 2), k=-i) / 2
Up *= np.power(dt, 3)
return Up | b10ed27076ba0c27ec3cb80777c01f6082a3f9a6 | 3,632,122 |
def print_person(first, last, middle=None):
"""Prints out person's names
This funciton prints out a person's name. It's not too useful
Args:
first (str): This person's first name
last (str): This person's last name
middle (str): Optional. This person's middle name
"""
middle=middle or ""
return '{f} {m} {l}'.format(f=first, m=middle, l=last) | 643ce351ec13a076c9fd36af39c97505084f1437 | 3,632,123 |
def get_governing_regions(strict=True):
"""! Creates a sorted list of governing regions which may simply be
federal states or intermediate regions which themselves are a real
subset of a federal state and to which a certain number of counties
is attributed.
Governing regions are generally denoted by the first three digits of the
belonging county IDs. In cases of a trailing zero, only two digits are
taken and for Rhineland Palatinate and Saxony, the strict definition
returns the two digit code of the federal state (i.e. 07 and 14).
Note that this list may include former 'governing regions'. However, this
function may only be used to equally distribute information which only exist
on the 'governing region' level but not on county level itself or where the
county level information seems to be wrong. Then, information is extrapolated
with the help of governing regions.
@param strict [Default: True] Defines whether only regions currently
considered as governing regions are returned.
@return List of governing regions.
"""
# Take first three digits, apply set() to remove double appearances and
# sort again.
if strict == False:
return sorted(set([id[0:3] for id in get_county_ids(zfill=True)]))
else:
# make exceptions for Rhineland Palatinate and Saxony and remove
# trailing zeros
return sorted(
set(
[id[0: 3]
if
not (id[0: 2] in ['07', '14'])
and (id[2] != '0') else id[0: 2]
for id in get_county_ids(zfill=True)])) | c1d85343381f065d95862d6c4a71ea3ef2af80d0 | 3,632,124 |
def _chomp_element(base, index, value):
"""Implementation of perl = and chomp on an array element"""
if value is None:
value = ''
base[index] = value.rstrip("\n")
return len(value) - len(base[index]) | 66cfde7c8d8f2c92f0eebb23f717bf50b676ca31 | 3,632,125 |
def write_primitive(group, name, data, ds_kwargs):
"""Note: No dataset chunk options (like compression) for scalar"""
data_type = type(data)
# Write dataset
if data_type == np.ndarray:
ds = group.create_dataset(name, data=data, **ds_kwargs) # enable compression for nonscalar numpy array
elif data_type == str:
ds = group.create_dataset(name, data=np.string_(data))
elif data_type == bool:
ds = group.create_dataset(name, data=int(data))
elif data_type == type(None):
ds = group.create_dataset(name, data=0)
else:
ds = group.create_dataset(name, data=data)
# Write attrs
write_attrs(ds, {'data_type': data_type, 'collection_type': 'primitive'})
return ds | 531dd9190bb94b6bde3f70a80170bfbcd62c75c8 | 3,632,126 |
def measurecrime(gps, radius):
"""Measures crime around a given location"""
latitude, longitude = gps
minlat = latitude - radius
maxlat = latitude + radius
minlong = longitude - radius
maxlong = longitude + radius
baseurl = (DATABASE + WHERE + LAT + GT + str(minlat) + AND + LAT + LT +
str(maxlat) + AND + LONG + GT + str(minlong) + AND + LONG + LT +
str(maxlong) + AMP + KEY + AMP + FBI_CODE)
crimescore = 0
for code in CLASSIFICATION.keys():
query = baseurl + code
component = CLASSIFICATION[code] * countrows(query)
crimescore += component
return crimescore | 3a216b56744b32ec846a4edc590dd7ff72f4e69c | 3,632,127 |
def _weights(name, shape, mean=0.0, stddev=0.02):
""" Helper to create an initialized Variable
Args:
name: name of the variable
shape: list of ints
mean: mean of a Gaussian
stddev: standard deviation of a Gaussian
Returns:
A trainable variable
"""
var = tf.get_variable(
name, shape,
initializer=tf.random_normal_initializer(
mean=mean, stddev=stddev, dtype=tf.float32))
return var | a9e378cebaec6aa45b52d393a73d032e742df594 | 3,632,128 |
def packed_function(function):
"""returns a function with a single input"""
# needed in python 3.x since lambda functions are not automatically unpacked
# as they were in python 2.7
if hasattr(function, '__code__'):
if function.__code__.co_argcount > 1:
return pack(function)
return function | eb6996ed0215a2e4f33509665aed754731626316 | 3,632,129 |
from bs4 import BeautifulSoup
import re
async def read_ratings(session, archive_url, archive_timestamp, archive_content):
"""
Extract a movie rating from its imdb page
:raise: A ScrapeError if the rating could not be extracted
:return:
"""
try:
soup = BeautifulSoup(archive_content, 'html.parser')
ratings_element = soup.find('span', itemprop="ratingValue")
if ratings_element is not None and ratings_element.string != '-':
return float(ratings_element.string.replace(',', '.'))
ratings_element = soup.find('div', class_="star-box-giga-star")
if ratings_element is not None:
return float(ratings_element.string)
ratings_element = soup.find('span', class_="rating-rating")
if ratings_element is not None:
if type(ratings_element.contents[0]) is element.NavigableString:
return float(ratings_element.contents[0].string)
else:
return float(ratings_element.span.string)
# Fallback, find a string matching "float/10"
ratings_ovr_ten = soup.find(string=re.compile("^[\d\.]+/10$"))
if ratings_ovr_ten is not None:
return float(ratings_ovr_ten.string.split('/')[0])
raise ScrapeError('Ratings not found')
except ValueError:
raise ScrapeError('Not a valid number') | 074203a533d6ef650f221ec825f16e85ed63d60d | 3,632,130 |
import time
def generate_nonce():
"""
Generates nonce for signature
Returns:
nonce (int) : timestamp epoch
"""
return int(time.time() + 100) | c439fc6598b4f5359d71bde8865afacb6162df19 | 3,632,131 |
def dense(x, inp_dim, out_dim, name = 'dense'):
"""
Used to create a dense layer.
:param x: input tensor to the dense layer
:param inp_dim: no. of input neurons
:param out_dim: no. of output neurons
:param name: name of the entire dense layer.i.e, variable scope name.
:return: tensor with shape [batch_size, out_dim]
"""
with tf.compat.v1.variable_scope(name, reuse=None):
weights = tf.compat.v1.get_variable("weights", shape=[inp_dim, out_dim],
initializer=tf.contrib.layers.xavier_initializer())
bias = tf.compat.v1.get_variable("bias", shape=[out_dim], initializer=tf.constant_initializer(0.0))
out = tf.add(tf.matmul(x, weights), bias, name='matmul')
return out | dda9c6deb6cc2c270bfa3a53b7b771dbaa61c77e | 3,632,132 |
def read_observation_time_range(observation_id):
"""Get the time range of values for a observation.
Parameters
----------
observation_id: string
UUID of associated observation.
Returns
-------
dict
With `min_timestamp` and `max_timestamp` keys that are either
dt.datetime objects or None
"""
return _call_procedure_for_single(
'read_observation_time_range', observation_id) | c0f45341bf36c40e0cf97710cb593372b058219c | 3,632,133 |
import ray
def compute_mean_image(batches):
"""Computes the mean image given a list of batches of images.
Args:
batches (List[ObjectID]): A list of batches of images.
Returns:
ndarray: The mean image
"""
if len(batches) == 0:
raise Exception("No images were passed into `compute_mean_image`.")
sum_image_ids = [ra.sum.remote(batch, axis=0) for batch in batches]
n_images = num_images.remote(batches)
return np.sum(ray.get(sum_image_ids), axis=0).astype("float64") / ray.get(n_images) | c6d43fb36e207a890f83965a1491e30597c54b44 | 3,632,134 |
def get_sevt(r: Request, resp: Response):
"""
SEVT web server route for GET request.
:param r Request object, provides access to method, headers & cookies:
:param resp Response Object used for modification of status code:
:return returns the content of the internal resources response:
"""
if check_token(r.headers):
resp.status_code = status.HTTP_200_OK
return response_json.response
resp.status_code = status.HTTP_401_UNAUTHORIZED
return | f27d3412526aaee37dd1d8350ee91b04fc7e1215 | 3,632,135 |
def mse(actual, predicted):
"""
https://ml-cheatsheet.readthedocs.io/en/latest/linear_regression.html#cost-function
MSE = the mean of (actual_outcome - predicted_outcome) squared
"""
return np.mean(np.power(actual - predicted, 2)) | cf514e2dbd126806f8921d479b9813cd1b3a08c5 | 3,632,136 |
import sys
def get_type_hints(fn):
"""Gets the type hint associated with an arbitrary object fn.
Always returns a valid IOTypeHints object, creating one if necessary.
"""
# pylint: disable=protected-access
if not hasattr(fn, '_type_hints'):
try:
fn._type_hints = IOTypeHints()
except (AttributeError, TypeError):
# Can't add arbitrary attributes to this object,
# but might have some restrictions anyways...
hints = IOTypeHints()
# Python 3.7 introduces annotations for _MethodDescriptorTypes.
if isinstance(fn, _MethodDescriptorType) and sys.version_info < (3, 7):
hints.set_input_types(fn.__objclass__)
return hints
return fn._type_hints
# pylint: enable=protected-access | f654fe79d224b91916888ac942a74c5ab3bd0e69 | 3,632,137 |
import tempfile
import os
def notebook_to_md(notebook):
"""Convert a notebook to its Markdown representation, using Pandoc"""
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(ipynb_writes(notebook).encode('utf-8'))
tmp_file.close()
pandoc(u'--from ipynb --to markdown -s --atx-headers --wrap=preserve --preserve-tabs', tmp_file.name, tmp_file.name)
with open(tmp_file.name, encoding='utf-8') as opened_file:
text = opened_file.read()
os.unlink(tmp_file.name)
return '\n'.join(text.splitlines()) | 3c77654b509aa790726effd518d4a49df758c972 | 3,632,138 |
import os
def is_windows() -> bool: # pragma: no cover
"""
Returns True if the host operating system is Windows.
"""
return os.name == "nt" | 6205ceafa1b176a64a530f997d31d5b0d210201f | 3,632,139 |
def topsorted(outputs):
"""
Topological sort via non-recursive depth-first search
"""
assert isinstance(outputs, (list, tuple))
marks = {}
out = []
stack = [] # pylint: disable=W0621
# i: node
# jidx = number of children visited so far from that node
# marks: state of each node, which is one of
# 0: haven't visited
# 1: have visited, but not done visiting children
# 2: done visiting children
for x in outputs:
stack.append((x, 0))
while stack:
(i, jidx) = stack.pop()
if jidx == 0:
m = marks.get(i, 0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = get_parents(i)
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i, jidx + 1))
j = ps[jidx]
stack.append((j, 0))
return out | e6d0204784f7b8169092a9fb6f56044f3be365be | 3,632,140 |
import pisa.utils.log as log
import sys
import pkg_resources
def open_resource(resource, mode='r'):
"""Find the resource file (see find_resource), open it, and return a file
handle.
Parameters
----------
resource : str
Resource path; can be path relative to CWD, path relative to
PISA_RESOURCES environment variable (if defined), or a package resource
location relative to PISA's `pisa_examples/resources` sub-directory.
Within each path specified in PISA_RESOURCES and within the
`pisa_examples/resources` dir, the sub-directories 'data', 'scripts',
and 'settings' are checked for `resource` _before_ the base directories
are checked. Note that the **first** result found is returned.
mode : str
'r', 'w', or 'rw'; only 'r' is valid for package resources (as these
cannot be written)
Returns
-------
binary stream object (which behaves identically to a file object)
See Also
--------
find_resource
Locate a file or directory (in fileystem) or a package
resource
find_path
Locate a file or directory in the filesystem
Open a (file) package resource and return stream object.
Notes
-----
See help for pkg_resources module / resource_stream method for more details
on handling of package resources.
"""
# NOTE: this import needs to be here -- and not at top -- to avoid circular
# imports
log.logging.trace('Attempting to open resource "%s"', resource)
# 1) Check for file in filesystem at absolute path or relative to
# PISA_RESOURCES environment var
fs_exc_info = None
try:
resource_path = find_path(resource, fail=True)
except IOError:
fs_exc_info = sys.exc_info()
else:
log.logging.debug('Opening resource "%s" from filesystem at "%s"',
resource, resource_path)
return open(resource_path, mode=mode)
# 2) Look inside the installed pisa package; this should error out if not
# found
log.logging.trace('Searching package resources...')
pkg_exc_info = None
for subdir in RESOURCES_SUBDIRS + [None]:
if subdir is None:
augmented_path = resource
else:
augmented_path = '/'.join([subdir, resource])
try:
resource_spec = ('pisa_examples', 'resources/' + augmented_path)
stream = pkg_resources.resource_stream(*resource_spec)
# TODO: better way to check if read mode (i.e. will 'r' miss
# anything that can be specified to also mean "read mode")?
if mode.strip().lower() != 'r':
del stream
raise IOError(
'Illegal mode "%s" specified. Cannot open a PISA package'
' resource in anything besides "r" (read-only) mode.' %mode
)
except IOError:
pkg_exc_info = sys.exc_info()
else:
log.logging.debug('Opening resource "%s" from PISA package.',
resource)
return stream
if fs_exc_info is not None:
if pkg_exc_info is not None:
msg = ('Could not locate resource "%s" in filesystem OR in'
' installed PISA package.' %resource)
raise IOError(msg)
raise fs_exc_info[0](fs_exc_info[1]).with_traceback(fs_exc_info[2])
raise pkg_exc_info[0](pkg_exc_info[1]).with_traceback(pkg_exc_info[2]) | 37d2350996582a112c448d106a85a24bd27e5020 | 3,632,141 |
from scipy.spatial.distance import cdist
def merge_small_enclosed_subcavs(subcavs, minsize_subcavs = 50, min_contacts = 0.667, v = False):
"""
The watershed algorithm tends to overspan a bit, even when optimizing seeds.
This function aims at identifying small pockets (< minsize_subcavs)
that are heavily in contact with other subcavs (> min_contacts)
These subcavites are probably at the interface between 2 main subcavities,
or on their surface.
"""
# Create a copy of the subcavs array to not change in place, in case
_subcavs = subcavs.copy()
# lengths of each subcavity
lengths = [len(x) for x in subcavs]
#Smaller ones than min_contacts
smallsubcavs = [[x, lengths[x]] for x in range(0, len(lengths)) if lengths[x] < minsize_subcavs]
if not smallsubcavs:
return subcavs
to_del = {}
for small in smallsubcavs:
contacts = []
contact = 0.
total_contact = 0.
i = 0
# Check the contact between the small subcavity and the others
for other_subcavs in subcavs:
if i == small[0]:
contacts.append(0)
i+=1
continue
contact = len(set(np.nonzero(cdist(subcavs[small[0]], other_subcavs) < 1.01)[0]))
contacts.append(contact)
total_contact += contact/small[1]
i+=1
# If a small subcavity has more than min_contacts with neighbors, be ready to add it to the
# subcavity with which it has the more contacts
if total_contact >= min_contacts:
if v == True: print(f"Subcavity {small[0]} is small and enclosed {total_contact*100:.2f}% in other subcavs.\nIt will be added to subcavity {np.argmax(contacts)} (original numbering of subcavs from 0)")
to_del[small[0]] = np.argmax(contacts)
# to_del is dict which contains key = index of subcav to delete; value = index of subcav to merge it with
# If there's any subcavities to merge
# It's a mess because it's not easy to merge different array elements together and/or delete some...
if to_del:
dels = []
for index in range(0, len(subcavs)):
if index in to_del.keys():
# original version: works generally, except in some cases in which we merge and replace 2 equally sized small arrays (memory issue?)
try:
# _tmp contains the merged subcavity
_tmp = np.concatenate((_subcavs[to_del[index]], _subcavs[index]), axis = 0)
# now we assign the merged subcavity to the index of the main subcav
_subcavs[to_del[index]] = _tmp
# dirty work around: change to lists, and play with lists, and come back to array...
except:
_tmp = np.array(_subcavs[to_del[index]]).tolist() + np.array(_subcavs[index]).tolist()
_subcavs[to_del[index]] = np.array(_tmp)
dels.append(index)
subcavlist = []
for x in _subcavs:
# there is also here a mix of lists and arrays. Why?
try:
subcavlist.append(x.tolist())
except:
subcavlist.append(x)
for _del in sorted(dels, reverse=True):
del subcavlist[_del]
merged_subcavs = [np.array(x) for x in subcavlist]
return merged_subcavs
else:
return subcavs | 4f9b5e2cef4ffc7d64912bc31b18d3639ec62c26 | 3,632,142 |
import ast
def is_py3(file_path):
"""Check if code is Python3 compatible."""
# https://stackoverflow.com/a/40886697
code_data = open(file_path, "rb").read()
try:
ast.parse(code_data)
except SyntaxError:
return False
return True | 78a48bdcc682108ce4fbe6fffe4a235898beec1c | 3,632,143 |
import time
def getDate():
"""获得时间"""
return time.localtime() | 6f4f127b96ab6f754cc20e76219a54d039938320 | 3,632,144 |
import os
def modify_rsp(rsp_entries, other_rel_path, modify_after_num):
"""Create a modified rsp file for use in bisection.
Returns a new list from rsp.
For each file in rsp after the first modify_after_num files, prepend
other_rel_path.
"""
ret = []
for r in rsp_entries:
if is_path(r):
if modify_after_num == 0:
r = os.path.join(other_rel_path, r)
else:
modify_after_num -= 1
ret.append(r)
assert modify_after_num == 0
return ret | 473e785f110590e5b15f4359fcee168d3eb49e69 | 3,632,145 |
def wordEncrypt(word):
"""
Encrypt a word into list of keys using the cipher in file_cipher.
Definition
----------
def wordEncrypt(word):
Input
-----
word string
Output
------
list with numeric keys
Examples
--------
# Setup the encryption and decryption of password
if not os.path.exists(cipher_file):
set_up_cipher(cipher_file)
# Store encrypted password
pass_file = os.path.join(os.path.expanduser('~'),'.pass.cipher')
if not os.path.exists(pass_file):
print('')
prompt = 'Enter password: '
ipass = getpass.getpass(prompt)
cpass = wordEncrypt(ipass)
f = open(pass_file, 'w')
f.write(' '.join([ str(i) for i in cpass ])+'\n')
f.close()
# Read encrypted password
f = open(pass_file, 'r')
cpass = f.readline()[:-1]
f.close()
password = wordDecrypt([ int(c) for c in cpass.split()])
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014 - modified from
http://code.activestate.com/recipes/577954-encrypt-and-decrypt-text-and-text-files-beta/
"""
cipher = open(file_cipher).read()+'\n'
cipherWord = find(cipher,list(word))
keys = [randint(5001,7000), randint(2,5000)]
encryptedWord = baseExpansion(list(map(choice, cipherWord)),keys[0],keys[1])
encryptedWord.extend(keys)
return list(map(int,encryptedWord)) | 4b8abcd86bb77b3997fd856f0a47fd928e9e8c49 | 3,632,146 |
def is_special_identifier_char(c):
"""
Returns `True` iff character `c` should be escaped in an identifier
(i.e. it is a special character).
"""
return c in (
ESCAPEMENT_SYM, OLD_COMMENT_SYM, FILE_INCLUSION_SYM, UNIT_START_SYM,
UNIT_END_SYM, ALIAS_SYM, SLOT_SYM, INTENT_SYM,
CHOICE_START, CHOICE_END, CHOICE_SEP, OLD_CHOICE_START, OLD_CHOICE_END,
OLD_CHOICE_SEP, CASE_GEN_SYM, RAND_GEN_SYM, ARG_SYM, VARIATION_SYM
) | 762c5b2441753bb5f8f770092a56e48f5299d886 | 3,632,147 |
def delete_board(request):
"""
Removes the saved game board from user's profile.
User must be authenticated, i.e. must have the matching token.
Game board in the user's profile identified by game_id must exist.
user_id: unique user identifier (same as username).
token: authentication token that allow access to the user's account.
game_id: ID of the saved game.
:param request: POST request with fields 'user_id', 'game_id', 'token'.
:return: success message, or error status.
"""
required_fields = ['user_id', 'game_id', 'token']
# Check if the post request contain the required fields
if set(required_fields) != set(list(request.data.keys())):
return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)
# POST Request content
data = request.data
# check for not allowed characters
if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['game_id'])) \
or check_special_characters(str(data['token'])):
return Response({'error': str('Unaccepted character passed!')},
status=status.HTTP_400_BAD_REQUEST)
# Here check if user_id matches the token with the database
if not db.check_user(data['user_id'], data['token']):
return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)
# Here delete the game board from user's saved profile
if not db.delete_game(data['user_id'], data['game_id']):
return Response({'error': str('Error when deleting the game!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'status': 'success'}) | 768968b7d3e3a220f1915d2286aaede7a29b3a2b | 3,632,148 |
import os
def showPlot(surface, save = True, folderName = '', fileName = '', file_format = 'PNG', showImage = True):
"""
Display the diagram and save it to the local.
Args:
surface: skia.Surface.
fileName: str-the name for the generated file: either the input filename or
temp.png if '' (default) in order to show the plots only instead of saving files.
fileFormat = 'PNG' (default) or 'JPEG'.
folderName = name for the folder to save the images
Returns:
the drew image array
"""
if folderName:
if not os.path.exists(os.getcwd() + '/' + folderName):
os.makedirs(os.getcwd() + '/' + folderName)
if fileName == '':
#random_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
#tmpfileName = os.path.join(os.getcwd() + '/' + folderName, random_string)
#shows the plot only instead of saving the files
tmpfileName = 'temp'
image = surface.makeImageSnapshot()
if save:
tmpfileName = tmpfileName + '.png'
image.save(tmpfileName, skia.kPNG)
if showImage:
pil_im = Image.open(tmpfileName)
display(pil_im)
#pil_im.show()
#self.surface.write_to_png(tmpfileName)
else:
fileName = os.path.join(os.getcwd() + '/' + folderName,fileName)
image = surface.makeImageSnapshot()
if save:
if file_format == 'PNG':
fileName = fileName + '.png'
image.save(fileName, skia.kPNG)
if showImage:
pil_im = Image.open(fileName)
display(pil_im)
elif file_format == 'JPEG':
fileName = fileName + '.jpg'
image.save(fileName, skia.kJPEG)
if showImage:
pil_im = Image.open(fileName)
display(pil_im)
elif file_format == 'PDF':
fileName = fileName + '.png'
image.save(fileName, skia.kPNG)
if showImage:
pil_im = Image.open(fileName)
display(pil_im)
#pil_im.show()
# imagepdf = pil_im.convert('RGB')
# imagepdf.save(fileNamepdf)
return image.toarray() | 2478f329febd346c1e728b4564dc772cc1673372 | 3,632,149 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: RubrikPolaris client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
max_fetch = demisto.params().get('max_fetch')
first_fetch = demisto.params().get('first_fetch')
if max_fetch:
try:
max_fetch = int(max_fetch)
except ValueError:
return "The 'Fetch Limit' is not a valid integer. The default value is 50 with a maximum of 200."
if max_fetch > 200:
return "The 'Fetch Limit' can not be greater than 200."
if first_fetch:
try:
last_run_obj = dateparser.parse(first_fetch, [DATE_TIME_FORMAT])
if last_run_obj is None:
raise ValueError
except ValueError:
return "We were unable to parse the First Fetch variable. Make sure the provided value follows the " \
"Relative Dates outlined at https://dateparser.readthedocs.io/en/latest/#relative-dates"
try:
client.get_api_token()
except DemistoException as e:
errorMessage = str(e)
if 'Verify that the server URL parameter' in errorMessage:
return "We were unable to connect to the provided Polaris Account. Verify it has been entered correctly."
elif 'Unauthorized' in errorMessage:
return "Incorrect email address or password."
else:
raise e
return "ok" | 2f0662bacec47a30464ab36bbe162c69c32b129a | 3,632,150 |
import logging
def cnn_v0(state,
num_actions,
scope,
channels=32,
activation_fn=None,
is_training=True,
reuse=False,
use_timestep=True):
"""CNN architecture for discrete-output DQN.
Args:
state: 2-Tuple of image and timestep tensors: (image, timestep).
num_actions: (int) Number of discrete actions.
scope: String name of the TF variable scope.
channels: Number of channels in each layer.
activation_fn: Python function specifying activation of final layer. Can be
used to implement action clipping for DDPG Actors.
is_training: Whether this graph is for training or inference.
reuse: Whether or not to reuse variables from this variable scope.
use_timestep: If True, incorporate timestep into model prediction.
Returns:
Tensor of size (batch_size, num_actions) representing Q(s, a) for each
action.
"""
if use_timestep:
net, timestep = state
else:
net = state
end_points = {}
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with slim.arg_scope(tf_modules.argscope(is_training=is_training)):
for layer_index in range(3):
net = slim.conv2d(net, channels, kernel_size=3)
logging.info('conv%d %s', layer_index, net.get_shape())
if use_timestep:
_, height, width, _ = net.get_shape().as_list()
timestep = tf.cast(timestep, tf.float32)
timestep = tf.tile(tf.reshape(timestep, [-1, 1, 1, 1]),
[1, height, width, 1])
net = tf.concat([net, timestep], axis=3)
net = tf.layers.flatten(net)
net = slim.stack(net, slim.fully_connected, [channels, channels])
net = slim.fully_connected(net,
num_outputs=num_actions,
normalizer_fn=None,
weights_regularizer=None,
activation_fn=activation_fn)
return net, end_points | e5769dc5bffdc1e0a6e3d415c83238e5682e4c9a | 3,632,151 |
def get_collections(expand=False, as_dataframe=False):
"""Get available collections.
Collections are folders on the local disk that contain downloaded or
created data along with associated metadata.
Args:
expand (bool, Optional, Default=False):
include collection details and format as dict
as_dataframe (bool, Optional, Default=False):
include collection details and format as pandas dataframe
Returns:
collections (list, dict, or pandas dataframe, Default=list):
all available collections
"""
collections = _load_collections()
if not expand and not as_dataframe:
collections = list(collections.keys())
if as_dataframe:
collections = pd.DataFrame.from_dict(collections, orient='index')
return collections | 45bce659dbd62a3f28d7d1d48f32e6503d11c3bd | 3,632,152 |
def jsonable_safe(obj):
"""Convert to JSON-able, if possible.
Based on fastapi.encoders.jsonable_encoder.
"""
try:
return jsonable_encoder(obj, exclude_none=True)
except:
return obj | 5c9a8e4e6ab11ddb0735a122eabe9c28649c7371 | 3,632,153 |
def classify_segment(data: list, no_of_outliers: int, acceptable_outlier_percent: float = .34) -> object:
"""
:param data: A list of Datapoints-current window
:param no_of_outliers: The number of datapoints in the current window assigned as outliers
:param acceptable_outlier_percent: The acceptable outlier percentage in a window default is 34 percent
:return: The quality of the current window in terms of outliers
"""
if no_of_outliers > int(acceptable_outlier_percent * len(data)):
return Quality.UNACCEPTABLE
else:
return Quality.ACCEPTABLE | 0ca846244ff8137e7dd145dd76c1cd35f276e641 | 3,632,154 |
def nearest_neighbor(v, candidates, k=1):
"""
Input:
- v, the vector you are going find the nearest neighbor for
- candidates: a set of vectors where we will find the neighbors
- k: top k nearest neighbors to find
Output:
- k_idx: the indices of the top k closest vectors in sorted form
"""
similarity_l = []
# for each candidate vector...
for row in candidates:
# get the cosine similarity
cos_similarity = cosine_similarity(v,row)
# append the similarity to the list
similarity_l.append(cos_similarity)
# sort the similarity list and get the indices of the sorted list
sorted_ids = np.argsort(similarity_l)
# get the indices of the k most similar candidate vectors
k_idx = sorted_ids[-k:]
return k_idx | 77cf4a84a3b6150e0e46d3a3c42f033600fad785 | 3,632,155 |
def reverse(x):
"""
:type x: int
:rtype: int
"""
new_str = str(x)
i = 1
rev_str = new_str[::-1]
if rev_str[-1] == "-":
rev_str = rev_str.strip("-")
i = -1
if (int(rev_str)>=2**31):
return 0
return (int(rev_str)) * i | 5775fe83f500ac844fa9fc94a4d71fc3bb6f165b | 3,632,156 |
def create_request(
service: str,
request: str,
settings: list = None,
ovrds: list = None,
append: dict = None,
**kwargs,
) -> blpapi.request.Request:
"""
Create request for query
Args:
service: service name
request: request name
settings: list of settings
ovrds: list of overrides
append: info to be appended to request directly
kwargs: other overrides
Returns:
Bloomberg request
"""
srv = conn.bbg_service(service=service, **kwargs)
req = srv.createRequest(request)
list(starmap(req.set, settings if settings else []))
if ovrds:
ovrd = req.getElement('overrides')
for fld, val in ovrds:
item = ovrd.appendElement()
item.setElement('fieldId', fld)
item.setElement('value', val)
if append:
for key, val in append.items():
vals = [val] if isinstance(val, str) else val
for v in vals: req.append(key, v)
return req | 9a10ca81cb0ae773293a9ed3abeb1bd4914f3195 | 3,632,157 |
def delete_byID(iid):
""" Delete an item by ID
"""
global conn, curs
try:
sql = "DELETE FROM tbl_inc_exp WHERE id={}".format(iid)
curs.execute(sql)
conn.commit()
return True
except:
return False | 88174e4c216d4e6a716b38a2ab2651c53ba4565e | 3,632,158 |
import tempfile
import os
def _get_required_checks_and_statuses(pr, cfg):
"""return a list of required statuses and checks"""
ignored_statuses = cfg.get(
'bot', {}).get(
'automerge_options', {}).get(
'ignored_statuses', [])
required = ["linter"]
with tempfile.TemporaryDirectory() as tmpdir:
_run_git_command("clone", pr.head.repo.clone_url, tmpdir)
with pushd(tmpdir):
_run_git_command("checkout", pr.head.sha)
if os.path.exists("appveyor.yml") or os.path.exists(".appveyor.yml"):
required.append("appveyor")
if os.path.exists(".drone.yml"):
required.append("drone")
if os.path.exists(".travis.yml"):
required.append("travis")
if os.path.exists("azure-pipelines.yml"):
required.append("azure")
# smithy writes this config even if circle is off, but we can check
# for other things
if (
os.path.exists(".circleci/config.yml")
and _circle_is_active()
):
required.append("circle")
return [
r.lower()
for r in required
if not any(r.lower() in _i for _i in ignored_statuses)
] | bbe9bbff4dab206eed7b7783f4583433163d2a10 | 3,632,159 |
def get_props(adapter=None,
device=None,
service=None,
characteristic=None,
descriptor=None):
"""
Get properties for the specified object
:param adapter: Adapter Address
:param device: Device Address
:param service: GATT Service UUID
:param characteristic: GATT Characteristic UUID
:param descriptor: GATT Descriptor UUID
:return: Object of the DBus properties available
"""
path_obj = get_dbus_path(adapter,
device,
service,
characteristic,
descriptor)
return get_dbus_iface(dbus.PROPERTIES_IFACE, get_dbus_obj(path_obj)) | 3dc1bd3cdab5520d7edc1770682da5ad6b95a643 | 3,632,160 |
def table_entry_pretty_print(entry, indent, line_wrap=-1):
###############################################################################
"""Create and return a pretty print string of the contents of <entry>"""
output = ""
outline = "<{}".format(entry.tag)
for name in entry.attrib:
outline += " {}={}".format(name, entry.attrib[name])
# end for
has_children = len(list(entry)) > 0
has_text = entry.text
if has_children or has_text:
# We have sub-structure, close and print this tag
outline += ">"
output += _format_line(outline, indent, line_wrap)
else:
# No sub-structure, we are done with this tag
outline += " />"
output += _format_line(outline, indent, line_wrap)
# end if
if has_children:
for child in entry:
output += table_entry_pretty_print(child, indent+1,
line_wrap=line_wrap)
# end for
# end if
if has_text:
output += _format_line(entry.text, indent+1, line_wrap)
# end if
if has_children or has_text:
# We had sub-structure, print the close tag
outline = "</{}>".format(entry.tag)
output = output.rstrip() + '\n' + _format_line(outline,
indent, line_wrap)
# end if
return output | 9ecf205fcdb9a3c2e3eaf97c8fe31dc97cb2d90e | 3,632,161 |
def part_1(input_data: list[int]) -> int:
"""Count the number of times a depth measurement increases from the previous measurement.
Args:
input_data (str): depths
Returns:
int: number of depth increases
"""
inc_count = 0
for i, depth in enumerate(input_data):
if i != 0 and input_data[i] > input_data[i - 1]:
inc_count += 1
return inc_count | 3ee506aca019f9393c93ced75e430d53b31a9fc2 | 3,632,162 |
from typing import Optional
def find_base_split_commit(split_dir, base_commit) -> Optional[str]:
""" Return the hash of the base commit in the specified
split repository derived from the specified monorepo base commit. """
mono_base_commit = git_output('rev-list', '--first-parent', '-n', '1', '--grep',
f'^apple-llvm-split-dir: {split_dir}/*$', base_commit,
ignore_error=True)
if not mono_base_commit:
return None
SPLIT_COMMIT_TRAILER = 'apple-llvm-split-commit:'
for line in git_output('rev-list', '-n', '1', '--format=%B',
mono_base_commit).splitlines():
if line.startswith(SPLIT_COMMIT_TRAILER):
return line[len(SPLIT_COMMIT_TRAILER):].strip()
return None | 47c347f86936446c61cc368b646aa4bdedfedeed | 3,632,163 |
def evaluate(roughness, eta, wo, wi, dist):
# return brdf value (didn't multiply cos)
"""Evaluate BRDF and PDFs for Walter BxDF."""
# eta is assumed > 1, and it is the refractive index of the side of the
# surface facing away from the normal.
rGain = 1.0
boostReflect = 1.0
tAlbedo = np.array((1.0, 1.0, 1.0))
# Convention is for forward path tracing; "i" is the viewer and "o" is the light.
VdotN = wi[...,2]
LdotN = wo[...,2]
isRefraction = ( LdotN*VdotN < 0.0 )
# Refractive indices for the two sides. eta_o is the index for the side
# opposite wi, even when wo is on the same side.
eta_i = np.where(VdotN > 0, 1.0, eta)
eta_o = np.where(VdotN > 0, eta, 1.0)
# Half vector.
H = np.where(isRefraction[nax],
-(eta_o[nax] * wo + eta_i[nax] * wi),
SIGN(VdotN)[nax] * (wo + wi))
H = Normalize( H )
# check side for H
correct = H[...,2] * wi[...,2] > 0
VdotH = Dot( wi, H )
absVdotH = ABS( VdotH )
LdotH = Dot( wo, H )
absLdotH = ABS( LdotH )
# This seems always to compute Fresnel factor for the ray coming from outside.
#F = Fresnel( absVdotH, eta )
# Compute fresnel factor for the appropriate side of the surface.
F = fresnel(eta_i, eta_o, absVdotH)
chooseReflect = F * rGain * boostReflect
chooseRefract = (1.0-F) * Max( tAlbedo )
total = chooseRefract + chooseReflect
chooseReflect = chooseReflect / total
chooseRefract = 1.0 - chooseReflect
roughness2 = roughness*roughness
HdotN = H[...,2]
costheta = ABS( HdotN )
costheta2 = costheta * costheta
if dist == 'G':
# Compute the microfacet distribution (GGX): eq 33
alpha2_tantheta2 = roughness2 + ( 1.0 - costheta2 ) / costheta2
D = chi_plus(HdotN) * roughness2 / np.pi / ( costheta2*costheta2 * alpha2_tantheta2*alpha2_tantheta2 )
# Compute the Smith shadowing terms: eq 34 and eq 23
LdotN2 = LdotN * LdotN
VdotN2 = VdotN * VdotN
iG1o = 1.0 + SQRT( 1.0 + roughness2 * ( 1.0 - LdotN2 ) / LdotN2 )
iG1i = 1.0 + SQRT( 1.0 + roughness2 * ( 1.0 - VdotN2 ) / VdotN2 )
G = chi_plus(VdotH/VdotN) * chi_plus(LdotH/LdotN) * 4.0 / ( iG1o * iG1i )
elif dist == 'B':
# Beckmann distribution and shadowing masking term
# Compute the Beckmann Distribution: eq 25
tantheta2 = ( 1.0 - costheta2 ) / costheta2
D = chi_plus(HdotN)/(np.pi * roughness2 * costheta2 * costheta2) * np.exp(-tantheta2/roughness2);
# Shadowing masking term for Beckmann: eq 27
costhetav = ABS(VdotN)
tanthetav = SQRT(1 - costhetav*costhetav)/costhetav
a = 1.0/(roughness * tanthetav)
iG1i = np.where(a<1.6, BeckmannG1(VdotH/VdotN, a), BeckmannG2(VdotH/VdotN))
costhetal = ABS(LdotN)
tanthetal = SQRT(1 - costhetal*costhetal)/costhetal
a = 1.0/(roughness * tanthetal)
iG1o = np.where(a<1.6, BeckmannG1(LdotH/LdotN, a), BeckmannG2(LdotH/LdotN))
G = iG1i * iG1o
else:
raise ValueError('dist is neither G nor B')
# Final BRDF value and PDF: eq 41
# Refraction case
denom = ( VdotH + (eta_o/eta_i) * LdotH)**2
idenom = 1.0 / denom
fJacobian = absLdotH * idenom
rJacobian = absVdotH * idenom
# refract_value = tAlbedo * ( (1.0-F) * D * G * absVdotH * fJacobian * (eta_o/eta_i)**2 / ABS( VdotN ) )[...,np.newaxis] # baking LdotN
refract_value = tAlbedo * ( (1.0-F) * D * G * absVdotH * fJacobian * (eta_o/eta_i)**2 / (ABS( VdotN ) *ABS( LdotN )))[...,np.newaxis] # not baking LdotN
refract_fpdf = chi_plus(VdotH/VdotN) * chi_plus(LdotH/LdotN) * chooseRefract * D*costheta * fJacobian * (eta_o/eta_i)**2
refract_rpdf = chi_plus(VdotH/VdotN) * chi_plus(LdotH/LdotN) * chooseRefract * D*costheta * rJacobian
# Reflection case
jacobian = 1.0 / ( 4.0 * absLdotH ) # LdotH = VdotH by definition
# reflect_value = makeRtColorRGB( rGain * F * D * G / ( 4.0 * ABS( VdotN ) ) ) # baking LdotN
reflect_value = makeRtColorRGB( rGain * F * D * G / ( 4.0 * ABS( VdotN ) * ABS( LdotN )) ) # no baking LdotN
reflect_fpdf = chi_plus(VdotH/VdotN) * chi_plus(LdotH/LdotN) * chooseReflect * D*costheta * jacobian
reflect_rpdf = chi_plus(VdotH/VdotN) * chi_plus(LdotH/LdotN) * chooseReflect * D*costheta * jacobian
value = np.where(isRefraction[...,np.newaxis], refract_value, reflect_value)
fpdf = np.where(isRefraction, refract_fpdf, reflect_fpdf)
rpdf = np.where(isRefraction, refract_rpdf, reflect_rpdf)
return (value, fpdf, rpdf ) | cda0b86434456647e21dd3622cdf6331d267112d | 3,632,164 |
def round_floats(number):
"""A function which converts float values of comparison scores into floats
with no more than two decimal figures. No precision is lost this way - the
point is to convert numbers like 1.7499999999 into 1.75.
Arguments:
number (float): the value of a comparison score
Returns:
float: value of a comparison score with two decimal figures at most
"""
getcontext()
two_decimal_places = Decimal('0.01')
rounded_number = Decimal(number).quantize(two_decimal_places)
return float(rounded_number) | a9603b6a4ee30385d320f73b3a769704008197f0 | 3,632,165 |
async def async_create_entities(hass, config):
"""Create the template binary sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
value_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
attribute_templates = device_config.get(CONF_ATTRIBUTE_TEMPLATES, {})
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
device_class = device_config.get(CONF_DEVICE_CLASS)
delay_on = device_config.get(CONF_DELAY_ON)
delay_off = device_config.get(CONF_DELAY_OFF)
unique_id = device_config.get(CONF_UNIQUE_ID)
sensors.append(
BinarySensorTemplate(
hass,
device,
friendly_name,
device_class,
value_template,
icon_template,
entity_picture_template,
availability_template,
delay_on,
delay_off,
attribute_templates,
unique_id,
)
)
return sensors | 2a978648f6db6aebf0c56eea92e5deb30cf9f05b | 3,632,166 |
import aiohttp
async def fetch_user(bearer: str) -> dict:
"""Fetch information about a user from their bearer token."""
headers = {"Authorization": f"Bearer {bearer}"}
async with aiohttp.ClientSession(headers=headers, raise_for_status=True) as sess:
resp = await sess.get(f"{API_BASE}/users/@me")
return await resp.json() | 8f5132f261f518bd3d9e05d7acfb5ed893f63a55 | 3,632,167 |
import os
def select_model(model_path):
""" select model """
model_str = os.path.basename(model_path)
model_str = model_str.split('.py')[0]
import_root = ".".join((model_path.split(os.path.sep))[:-1])
exec("from %s import %s as model" % (import_root, model_str))
model.EXP_NAME = model_str
return model | e7c0dd35abeded088020876b7f14984c426ccfca | 3,632,168 |
def single_varint(data, index=0):
"""
The single_varint function processes a Varint and returns the
length of that Varint.
:param data: The data containing the Varint (maximum of 9
bytes in length as that is the maximum size of a Varint).
:param index: The current index within the data.
:return: varint, the processed varint value,
and index which is used to identify how long the Varint was.
"""
# If the decimal value is => 128 -- then first bit is set and
# need to process next byte.
if ord(data[index:index+1]) >= 128:
# Check if there is a three or more byte varint
if ord(data[index + 1: index + 2]) >= 128:
raise ValueError
varint = (ord(data[index:index+1]) - 128) * 128 + ord(
data[index + 1: index + 2])
index += 2
return varint, index
# If the decimal value is < 128 -- then first bit is not set
# and is the only byte of the Varint.
else:
varint = ord(data[index:index+1])
index += 1
return varint, index | 55b052300cc0cf5ac2fd8f7451ac121b408c1313 | 3,632,169 |
def dict_from_corpus(corpus):
"""
Scan corpus for all word ids that appear in it, then construct and return a mapping
which maps each `wordId -> str(wordId)`.
This function is used whenever *words* need to be displayed (as opposed to just
their ids) but no wordId->word mapping was provided. The resulting mapping
only covers words actually used in the corpus, up to the highest wordId found.
"""
num_terms = 1 + get_max_id(corpus)
id2word = FakeDict(num_terms)
return id2word | f9bbe1677ec1abc93c5f47095dc5f93e32278d42 | 3,632,170 |
import os
def read_fastspecfit(fastfitfile, fastphot=False, rows=None, columns=None):
"""Read the fitting results.
"""
if os.path.isfile(fastfitfile):
if fastphot:
ext = 'FASTPHOT'
else:
ext = 'FASTSPEC'
hdr = fitsio.read_header(fastfitfile, ext='METADATA')
specprod, coadd_type = hdr['SPECPROD'], hdr['COADDTYP']
fastfit = Table(fitsio.read(fastfitfile, ext=ext, rows=rows, columns=columns))
meta = Table(fitsio.read(fastfitfile, ext='METADATA', rows=rows, columns=columns))
log.info('Read {} object(s) from {} and specprod={}'.format(len(fastfit), fastfitfile, specprod))
return fastfit, meta, specprod, coadd_type
else:
log.warning('File {} not found.'.format(fastfitfile))
return None, None, None | aa33ed33b41d91843653b27fe9197ce5a1fa7d60 | 3,632,171 |
def mat2dict(matobj):
"""
A recursive function that constructs nested dictionaries from matobjects
"""
dictionary = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, scio.matlab.mio5_params.mat_struct):
dictionary[strg] = mat2dict(elem)
else:
dictionary[strg] = elem
return dictionary | 948b1640d2fc67f712f81a5e6a2655fe58df3b66 | 3,632,172 |
import random
def build_stop_sign():
"""
This function creates a stop sign with GLabel
:return: object, sign
"""
random_x = random.randint(0, 640)
random_y = random.randint(30, 500)
sign = GLabel('Stop Clicking !!! ', x=random_x, y=random_y)
sign.color = 'firebrick'
sign.font = 'Times New Roman-15-bold'
return sign | bcfb44fbe7259ffa0a14b2511f99cf001a2f42c2 | 3,632,173 |
def name_atom(atom):
"""->symbol
Return the atom symbol for a depiction. Carbons atoms
in general are not returned"""
symbol = "%s"%(atom.symbol,)
weight = atom.weight
charge = atom.charge
hcount = atom.hcount
explicit_hcount = atom.explicit_hcount
out_symbol = symbol
# pyrole like nitrogens
if atom.aromatic and symbol == "N" and charge == 0 and \
weight == 0 and explicit_hcount == 1 and hcount == 0:
# XXX Fix Me
# There should only be one of these per five membered
# aromatic ring
return "nH"
if symbol in ORGANIC_SUBSET and atom.valences and \
not weight and charge == 0 and len(atom.bonds) > 1:
sumOrders = atom.sumBondOrders()
hcount = atom.hcount
sum = int(hcount + sumOrders)
for valence in atom.valences:
if valence == sum:
if symbol == "C":
return ""
else:
return symbol
if not weight: weight = ""
if charge == -1: charge = "-"
elif charge == 1: charge = "+"
elif charge > 1: charge = "+%s"%charge
else: charge = ""
hcount += explicit_hcount
if hcount == 1: hcount = "H"
elif hcount == 0: hcount = ""
elif hcount > 1: hcount = "H%s"%hcount
else:
raise "Negative hcount!!!"
output = "%s%s%s%s"%(weight, out_symbol, hcount, charge)
return output | 5b6661046720f10ca2f28196f2c76a4732dc9d96 | 3,632,174 |
import os
import tempfile
import getpass
def preview_convert(file_in, command, size, gm_or_im):
"""
preview generation
file_in - fullname image file
dir_temp - fullname temporary directory
command - additional command for imagemagick or space
--
return: fullname preview file and size
"""
try:
image_size = magick.get_image_size(file_in, gm_or_im)
width = str(image_size[0])
height = str(image_size[1])
filesize = common.humansize(os.path.getsize(file_in))
cmd_magick = gm_or_im + "convert"
command = " -resize " + str(size) + "x" + str(size) + command
file_preview = os.path.join(tempfile.gettempdir(),
"fotokilof_" + getpass.getuser() \
+ "_preview.ppm")
magick.magick(command, file_in, file_preview, cmd_magick)
result = {'filename': file_preview, 'size': filesize, \
'width': width, 'height': height}
except:
log.write_log("Error in preview_convert: return", "E")
result = None
return result | e4240be329c964ef7ce434880e16b310cd6df39d | 3,632,175 |
import math
def rot3D(x, r):
"""perform 3D rotation
Args:
x (np.array): input data
r (float): rotation angle
Returns:
np.array: data after rotation
"""
Rx = np.array([[1, 0, 0], [0, math.cos(r[0]), -math.sin(r[0])], [0, math.sin(r[0]), math.cos(r[0])]])
Ry = np.array([[math.cos(r[1]), 0, math.sin(r[1])], [0, 1, 0], [-math.sin(r[1]), 0, math.cos(r[1])]])
Rz = np.array([[math.cos(r[2]), -math.sin(r[2]), 0], [math.sin(r[2]), math.cos(r[2]), 0], [0, 0, 1]])
R = Rz @ Ry @ Rx
x = R @ x
return x | df37aa011e3291a87309ceb72d11f8a531475a5e | 3,632,176 |
def perform_move(move: Move, attacking_creature: BattleCreature, defending_creature: BattleCreature, static_game_data: StaticGameData):
"""
Performs the move by the attacking creature on the defending_creature.
This can change both the attacking creature, the defending creature and
cause messages to be returned.
"""
messages = []
if move.pp <= 0:
messages.append(u"Not enough points to perform {0}".format(move.move_data.name))
else:
messages.append(u"{0} used {1}".format(attacking_creature.creature.in_battle_name(), move.move_data.name))
move.pp -= 1
# Check if the move misses
if not hit_calculation(move, attacking_creature, defending_creature):
messages.append(u"{0}'s attack missed!".format(attacking_creature.creature.in_battle_name()))
else:
# TODO: Missing the "specific-move" target and only considering 1v1 battles.
target = None
if move.move_data.target.identifier in ['user', 'users-field', 'user-or-ally', 'entire-field']:
target = attacking_creature
if move.move_data.target.identifier in ['selected-pokemon', 'random-opponent', 'all-other-pokemon',
'opponents-field', 'all-opponents', 'entire-field']:
target = defending_creature
if target:
if move.move_data.damage_move():
new_messages, hp_loss = damage_calculation(move, attacking_creature, target,
static_game_data.type_chart)
for message in new_messages:
messages.append(message)
hp_stat = static_game_data.stat(data.HP_STAT)
target.creature.adjust_stat(hp_stat, hp_loss)
if target.stat_value(hp_stat) <= 0:
target.creature.fainted = True
messages.append(u"{0} fainted!".format(target.creature.in_battle_name()))
if move.move_data.stat_change_move():
for stat, value in move.move_data.stat_changes.items():
if value != 0:
adjust_amount = target.adjust_stat_adjusts(stat, value)
messages.append(get_stat_change_message(move, target, adjust_amount, stat))
return messages | b0198f364b08c7eeaf0cc7b83bdf5f867d1c15e8 | 3,632,177 |
def _shake_shake_layer(x,
output_filters,
num_blocks,
stride,
is_training):
"""Builds many sub layers into one full layer."""
for block_num in range(num_blocks):
curr_stride = stride if (block_num == 0) else 1
x = _shake_shake_block(x, output_filters, curr_stride,is_training)
return x | 246ddba479735b94d2c94ba02bdb5c571a68bbc2 | 3,632,178 |
from typing import Optional
def filter_nan_values(data: DataFrame, used_cols: Optional[list[str]] = None):
"""
Filter NaNs in columns that are used for futher calculations
:param data: Dataframe with full dataset
:type data: DataFrame
:param used_cols: Columns to check, None checks all, defaults to None
:type used_cols: Optional[list[str]], optional
"""
return data.dropna(subset=used_cols).copy() | 95296587ff765c48c5e3f3f9db911a9971f50984 | 3,632,179 |
def dropout(inputs,
is_training,
scope,
keep_prob=0.5,
noise_shape=None):
""" Dropout layer.
Args:
inputs: tensor
is_training: boolean tf.Variable
scope: string
keep_prob: float in [0,fv_noise]
noise_shape: list of ints
Returns:
tensor variable
"""
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return outputs | 738553ae4e958e34a3daea680bd5736f288609d2 | 3,632,180 |
def test_trained_model(test_data,
clf_name,
model_dir_path = '',
iteration_number = 0,
is_abnormal = False,
threshold_value = 0):
"""
Test any model.
:param test_data:
:param clf_name:
:param model_dir_path:
:param iteration_number:
:param is_abnormal:
:return:
"""
if clf_name == 'single_ae' or clf_name == 'deep_ae':
_, mse_per_sample = test_trained_ae_model(test_data=test_data,
model_dir_path=model_dir_path,
iteration_number=iteration_number)
if is_abnormal:
test_ratio = sum([score > threshold_value for score in mse_per_sample])/float(len(mse_per_sample))
else:
test_ratio = sum([score <= threshold_value for score in mse_per_sample])/float(len(mse_per_sample))
else:
test_ratio, _ = test_trained_traditional_model(test_data=test_data,
clf_name=clf_name,
model_dir_path=model_dir_path,
iteration_number=iteration_number,
is_abnormal=is_abnormal)
return test_ratio | 7e1692e17590c1c8d3718073793e52ad5f07f055 | 3,632,181 |
import glob
def tumor_list(version):
"""
version: cross validation version and train or val
"""
path_list = []
for i in version:
paths = sorted(glob.glob(f'./data/tumor_-150_150/{i}/label_*/*.npy'))
path_list.extend(paths)
return path_list | da390686072613177a4f3f5b483d980640090d1c | 3,632,182 |
def roles_allowed(roles):
"""Takes a list of roles allowed to access decorated endpoint.
Aborts with 403 status if user with unauthorized role tries to access
this endpoint.
:param list roles: List of roles that should have access to the endpoint.
"""
def roles_allowed_decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
id = get_jwt_identity()
user = User.from_id(id)
for role in user.roles():
if role in roles:
return fn(*args, **kwargs)
abort(403)
return wrapper
return roles_allowed_decorator | 8d039b09529f65b2d1123c8dffd634d2d6873404 | 3,632,183 |
from typing import List
from typing import Optional
def get_offsets(
text: str,
tokens: List[str],
start: Optional[int] = 0) -> List[int]:
"""Calculate char offsets of each tokens.
Args:
text (str): The string before tokenized.
tokens (List[str]): The list of the string. Each string corresponds
token.
start (Optional[int]): The start position.
Returns:
(List[str]): The list of the offset.
"""
offsets = []
i = 0
for token in tokens:
for j, char in enumerate(token):
while char != text[i]:
i += 1
if j == 0:
offsets.append(i + start)
return offsets | 08a300d7bbc078b40c44fdb75baeafff50c6907b | 3,632,184 |
def get_content_ref_if_exists_and_not_remote(check):
"""
Given an OVAL check element, examine the ``xccdf_ns:check-content-ref``
If it exists and it isn't remote, pass it as the return value.
Otherwise, return None.
..see-also:: is_content_href_remote
"""
checkcontentref = check.find("./{%s}check-content-ref" % XCCDF11_NS)
if checkcontentref is None:
return None
if is_content_href_remote(checkcontentref):
return None
return checkcontentref | 5232813333fff299e4afd8dadc1e6e700441f5b0 | 3,632,185 |
def sat_pass(sats, t_arr, index_epoch, location=None):
"""Find when a satellite passes above the horizon at a gps location.
Calculate the :samp:`Altitude` & :samp:`Azimuth` of a
:class:`~skyfield.sgp4lib.EarthSatellite` object from
:func:`~embers.sat_utils.sat_ephemeris.load_tle` at
every instant of time in :samp:`t_arr` from
:func:`~embers.sat_utils.sat_ephemeris.epoch_time_array`.
Determine all the times that the satellite is above the
horizon, at a given gps :samp:`location` and returns the
pair of indices of :samp:`t_arr` at which the satellite rose
and set.
.. code-block:: python
from embers.sat_utils.sat_ephemeris import load_tle, epoch_ranges, epoch_time_array, sat_pass
sats, epochs = load_tle('~/embers-data/TLE/21576.txt')
epoch_range = epoch_ranges(epochs)
index_epoch = 0 # select first time interval from epoch_range
cadence = 10 # evaluate satellite position every 10 seconds
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch, cadence)
MWA = (-26.703319, 116.670815, 337.83) # gps coordinates of MWA Telescope
passes, alt, az = sat_pass(sats, t_arr, index_epoch, location=MWA)
:param sats: list of :class:`~skyfield.sgp4lib.EarthSatellite` objects
:param t_arr: skyfield :class:`~skyfield.timelib.Timescale` object with array of times
:param index_epoch: Index of :samp:`epoch_range` :class:`~int`
:param location: The :samp:`gps` coordinates of the :samp:`location` at which satellite passes are to be computed. :samp:`location` is a :class:`~tuple` in the format (:samp:`latitude`, :samp:`longitude`, :samp:`elevation`), with :samp:`elevation` given in :samp:`meters`
:returns:
A :class:`~tuple` of (passes, alt, az)
- passes: 2D array with pairs of indicies of :samp:`t_arr` corresponding to rise/set of satellite :class:`~numpy.ndarray`
- alt: Array of :samp:`Altitudes` of sat at :samp:`t_arr` times :class:`~numpy.ndarray`
- az: Array of :samp:`Azimuths` of sat at :samp:`t_arr` times :class:`~numpy.ndarray`
"""
# Position where sat passes are to be determined in Lat/Lon/Elevation
position = Topos(
latitude=location[0], longitude=location[1], elevation_m=location[2]
)
# Select satellite from sats with index_epoch
# Find position of sat at each timestep of t_arr
satellite = sats[index_epoch]
orbit = (satellite - position).at(t_arr)
alt, az, _ = orbit.altaz()
if alt.degrees.shape[0] > 0:
# Check if sat is above the horizon (above -1 degrees), return boolean array
above_horizon = alt.degrees >= -1
# Indicies of rare times that sats are above the horizon
(indicies,) = above_horizon.nonzero()
# Boundary times at which the sat either rises or sets
(boundaries,) = np.diff(above_horizon).nonzero()
if above_horizon[0]:
boundaries = [indicies[0]] + list(boundaries)
boundaries = np.asarray(boundaries)
if above_horizon[-1]:
boundaries = list(boundaries) + [indicies[-1]]
boundaries = np.asarray(boundaries)
# Reshape into pairs rise & set indicies
passes = boundaries.reshape(len(boundaries) // 2, 2)
return (passes, alt, az)
else:
return None | 5f68fa486533dec605fe084f8848b836177920d2 | 3,632,186 |
def get_logit_model(x_train: pd.DataFrame, y_train: pd.Series) -> LogisticRegression:
"""
Train and return a logistic regression model
"""
lr = LogisticRegression(penalty='l2',
solver='lbfgs',
fit_intercept=False,
intercept_scaling=False,
class_weight='balanced')
lr.fit(x_train, y_train)
return lr | c267769bdab34bc6fb272fc2376e56c0c28f2964 | 3,632,187 |
from typing import Dict
from typing import Any
from typing import Tuple
def makearglists(args: Dict[str, Any]) -> Tuple[str, str]:
"""
Returns the python code for argument declaration and argument passing to
the function that does the work
Parameters
----------
args: dict
Arg info for function returned by Wash Leg website
Returns
-------
arg_declare: str
String to paste into argument declaration of stub function
arg_pass: str
String to paste into backend call of stub function
"""
for key in args:
pytype = args[key]["type"].replace("s:", "").lower()
if pytype == "string":
pytype = "str"
elif pytype == "boolean":
pytype = "bool"
args[key]["python_type"] = pytype
args[key]["python_arg"] = snake_case(key)
arg_types = [f'{args[key]["python_arg"]}: {args[key]["python_type"]}' for key in args]
arg_declare = ", ".join(arg_types)
all_args = ", ".join([f"{key}={args[key]['python_arg']}" for key in args])
arg_pass = f"argdict: Dict[str,Any] = dict({all_args})"
return arg_declare, arg_pass | 4cbcb1f3fbff72f12249bc7c952120e84bbda644 | 3,632,188 |
def task1():
"""Task1 function of API3
Returns:
[str]: [Return string]
"""
logger.info("In API3 task1 function")
return "task1 success!" | 2d506d97ed116704f85cb335b5e324c974b28087 | 3,632,189 |
def _METIS_PartGraphKway(nvtxs, ncon, xadj, adjncy, vwgt, vsize,
adjwgt, nparts, tpwgts, ubvec, options, objval, part):
"""
Called by `part_graph`
"""
return _METIS_PartGraphKway.call(
nvtxs, ncon, xadj, adjncy, vwgt, vsize, adjwgt, nparts, tpwgts, ubvec,
options, objval, part) | 35965ed224058372e0831a10040065fde7c1b558 | 3,632,190 |
def parse_ocr_result(ocr_result, drms):
"""
Parses and extract data from the OCR document result string by
using a DRM (Document Regexp Model) that matches this OCR string.
Args:
ocr_result (str): OCR result string;
drms (dict): list of all DRMs dicts found in the DRM directory folder.
Returns:
(dict): the extracted data from the OCR results.
Example of the extracted data:
{
"fields": {
"field1": "result1",
"field2": "result2"
},
"table": {
"header": "table header",
"all_rows": "all rows together here...",
"rows": [
"row 1 result",
"row 2 result",
...
],
"footer": "table footer"
}
}
"""
logger.info("Verifying DRMs that match with this OCR document string...")
drms = get_all_drms_match(ocr_result, drms)
if not drms:
logger.warning("No DRM matches this OCR result. Returning None...")
return {}
drm = drms[0]
logger.info("Using the following DRM: %s", drm)
logger.info("Pre processing the OCR result according to DRM...")
pre_processed_result = pre_process_result(ocr_result, drm)
logger.debug(
"Showing pre processed OCR result...\n%s", pre_processed_result
)
logger.info("Extracting json data from the OCR pre processed result...")
data = extract_ocr_data(pre_processed_result, drm)
return data | 7016654ba83d49ca81c0628db4a265398eb98a2a | 3,632,191 |
def payment_insert(conn, payment_info):
"""
Inserts a row in 'payment' table with the values passed in 'payment_info'
Parameters:
conn: Connection object
payment_info: a tuple of values to insert
"""
try:
sql = " INSERT into payment(payment_id, user_id, payment_method, payment_number)" \
" VALUES (?, ?, ?, ?) "
cur = conn.cursor()
cur.execute(sql, payment_info)
conn.commit()
return cur.lastrowid
except Error as e:
print(e)
return str(e) | ad763dae5d8f313f34ebea7909097d558abc0565 | 3,632,192 |
def plot_pareto_frontier(
frontier: ParetoFrontierResults,
CI_level: float = DEFAULT_CI_LEVEL,
show_parameterization_on_hover: bool = True,
) -> AxPlotConfig:
"""Plot a Pareto frontier from a ParetoFrontierResults object.
Args:
frontier (ParetoFrontierResults): The results of the Pareto frontier
computation.
CI_level (float, optional): The confidence level, i.e. 0.95 (95%)
show_parameterization_on_hover (bool, optional): If True, show the
parameterization of the points on the frontier on hover.
Returns:
AEPlotConfig: The resulting Plotly plot definition.
"""
primary_means = frontier.means[frontier.primary_metric]
primary_sems = frontier.sems[frontier.primary_metric]
secondary_means = frontier.means[frontier.secondary_metric]
secondary_sems = frontier.sems[frontier.secondary_metric]
absolute_metrics = frontier.absolute_metrics
all_metrics = frontier.means.keys()
if frontier.arm_names is None:
arm_names = [f"Parameterization {i}" for i in range(len(frontier.param_dicts))]
else:
arm_names = [f"Arm {name}" for name in frontier.arm_names]
if CI_level is not None:
Z = 0.5 * norm.ppf(1 - (1 - CI_level) / 2)
else:
Z = None
primary_threshold = None
secondary_threshold = None
if frontier.objective_thresholds is not None:
primary_threshold = frontier.objective_thresholds.get(
frontier.primary_metric, None
)
secondary_threshold = frontier.objective_thresholds.get(
frontier.secondary_metric, None
)
labels = []
rel_x = frontier.secondary_metric not in absolute_metrics
rel_y = frontier.primary_metric not in absolute_metrics
for i, param_dict in enumerate(frontier.param_dicts):
label = f"<b>{arm_names[i]}</b><br>"
for metric in all_metrics:
metric_lab = _make_label(
mean=frontier.means[metric][i],
sem=frontier.sems[metric][i],
name=metric,
is_relative=metric not in absolute_metrics,
Z=Z,
)
label += metric_lab
parameterization = (
_format_dict(param_dict, "Parameterization")
if show_parameterization_on_hover
else ""
)
label += parameterization
labels.append(label)
traces = [
go.Scatter(
x=secondary_means,
y=primary_means,
error_x={
"type": "data",
"array": Z * np.array(secondary_sems),
"thickness": 2,
"color": rgba(COLORS.STEELBLUE.value, CI_OPACITY),
},
error_y={
"type": "data",
"array": Z * np.array(primary_sems),
"thickness": 2,
"color": rgba(COLORS.STEELBLUE.value, CI_OPACITY),
},
mode="markers",
text=labels,
hoverinfo="text",
)
]
shapes = []
if primary_threshold is not None:
shapes.append(
{
"type": "line",
"xref": "paper",
"x0": 0.0,
"x1": 1.0,
"yref": "y",
"y0": primary_threshold,
"y1": primary_threshold,
"line": {"color": rgba(COLORS.CORAL.value), "width": 3},
}
)
if secondary_threshold is not None:
shapes.append(
{
"type": "line",
"yref": "paper",
"y0": 0.0,
"y1": 1.0,
"xref": "x",
"x0": secondary_threshold,
"x1": secondary_threshold,
"line": {"color": rgba(COLORS.CORAL.value), "width": 3},
}
)
layout = go.Layout(
title="Pareto Frontier",
xaxis={
"title": frontier.secondary_metric,
"ticksuffix": "%" if rel_x else "",
"zeroline": True,
},
yaxis={
"title": frontier.primary_metric,
"ticksuffix": "%" if rel_y else "",
"zeroline": True,
},
hovermode="closest",
legend={"orientation": "h"},
width=750,
height=500,
margin=go.layout.Margin(pad=4, l=225, b=75, t=75), # noqa E741
shapes=shapes,
)
fig = go.Figure(data=traces, layout=layout)
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC) | e36389fc64801af71302f70ebc74600b34e9a7d1 | 3,632,193 |
def filenames(
directory, file_stem, file_ext=DEFAULT_FILE_EXT, stamp_regex=DEFAULT_STAMP_REGEX
):
"""Generate all filenames with matching stem
Parameters
----------
directory : pathlib.Path
Path to directory holding file
file_stem : str
File stem, filename without timestamp and extension
Yields
------
str
Name of file in directory matching stem
"""
return (
x.name
for x in filepaths(
directory, file_stem, file_ext=file_ext, stamp_regex=stamp_regex
)
) | 81f27c939ae2934d4e58079fcc3ac53c2b2228a1 | 3,632,194 |
from typing import Optional
def add_vqsr_eval_jobs(
b: hb.Batch,
dataproc_cluster: dataproc.DataprocCluster,
combined_mt_path: str,
rf_annotations_ht_path: str,
info_split_ht_path: str,
final_gathered_vcf_path: str,
rf_result_ht_path: Optional[str],
fam_stats_ht_path: Optional[str],
freq_ht_path: str,
work_bucket: str,
analysis_bucket: str, # pylint: disable=unused-argument
overwrite: bool,
vqsr_vcf_job: Job,
rf_anno_job: Job,
output_ht_path: str,
) -> Job:
"""
Make jobs that do evaluation VQSR model and applies the final filters
Returns the final_filter Job object and the path to the final filter HT
"""
job_name = 'AS-VQSR: load_vqsr'
vqsr_filters_split_ht_path = join(work_bucket, 'vqsr-filters-split.ht')
if overwrite or not utils.file_exists(vqsr_filters_split_ht_path):
load_vqsr_job = dataproc_cluster.add_job(
f'{utils.SCRIPTS_DIR}/load_vqsr.py --overwrite '
f'--split-multiallelic '
f'--out-path {vqsr_filters_split_ht_path} '
f'--vqsr-vcf-path {final_gathered_vcf_path} '
f'--bucket {work_bucket} ',
job_name=job_name,
)
load_vqsr_job.depends_on(vqsr_vcf_job)
else:
load_vqsr_job = b.new_job(f'{job_name} [reuse]')
job_name = 'AS-VQSR: evaluation'
score_bin_ht_path = join(work_bucket, 'vqsr-score-bin.ht')
score_bin_agg_ht_path = join(work_bucket, 'vqsr-score-agg-bin.ht')
if (
overwrite
or not utils.file_exists(score_bin_ht_path)
or not utils.file_exists(score_bin_agg_ht_path)
):
eval_job = dataproc_cluster.add_job(
f'{utils.SCRIPTS_DIR}/evaluation.py --overwrite '
f'--mt {combined_mt_path} '
f'--rf-annotations-ht {rf_annotations_ht_path} '
f'--info-split-ht {info_split_ht_path} '
+ (f'--fam-stats-ht {fam_stats_ht_path} ' if fam_stats_ht_path else '')
+ (
f'--rf-result-ht {rf_result_ht_path} '
if (rf_annotations_ht_path and rf_result_ht_path)
else ''
)
+ f'--vqsr-filters-split-ht {vqsr_filters_split_ht_path} '
f'--bucket {work_bucket} '
f'--out-bin-ht {score_bin_ht_path} '
f'--out-aggregated-bin-ht {score_bin_agg_ht_path} '
f'--run-sanity-checks ',
job_name=job_name,
)
eval_job.depends_on(load_vqsr_job, rf_anno_job)
else:
eval_job = b.new_job(f'{job_name} [reuse]')
job_name = 'AS-VQSR: final filter'
vqsr_model_id = 'vqsr_model'
if not utils.file_exists(output_ht_path):
final_filter_job = dataproc_cluster.add_job(
f'{utils.SCRIPTS_DIR}/final_filter.py --overwrite '
f'--out-final-filter-ht {output_ht_path} '
f'--vqsr-filters-split-ht {vqsr_filters_split_ht_path} '
f'--model-id {vqsr_model_id} '
f'--model-name VQSR '
f'--score-name AS_VQSLOD '
f'--info-split-ht {info_split_ht_path} '
f'--freq-ht {freq_ht_path} '
f'--score-bin-ht {score_bin_ht_path} '
f'--score-bin-agg-ht {score_bin_agg_ht_path} '
f'--bucket {work_bucket} ',
job_name=job_name,
)
final_filter_job.depends_on(eval_job)
else:
final_filter_job = b.new_job(f'{job_name} [reuse]')
return final_filter_job | 16baacc4bdb0fa5aab5c0413dcc4734ea79f6238 | 3,632,195 |
import os
def is_readable(path):
"""
This function checks to see if a file or a directory can be read.
This is tested by performing an operation that requires read access
on the file or the directory.
"""
if os.path.isdir(path):
try:
os.listdir(path)
except (OSError, IOError):
return False
else:
try:
with _open(path, 'r') as fd:
pass
except (OSError, IOError):
return False
return True | d3f8c65afe1d07d2b8cf1c9240c67c4638136710 | 3,632,196 |
def interpolate(r, g, b):
""" Interpolate missing values in the bayer pattern
by using bilinear interpolation
Args:
red, green, blue color channels as numpy array (H,W)
Returns:
Interpolated image as numpy array (H,W,3)
"""
#
# You code here
#
'''
rb各四分之一
g占array的一半
g的九宫格里面 四角还是g 这个不能算。
rb九宫格不会再有同色
'''
gfilter = np.array([
[0,1/4,0],
[1/4,1,1/4],
[0,1/4,0]
])
rbfilter = np.array([
[1/4,1/2,1/4],
[1/2,1,1/2],
[1/4,1/2,1/4]
])
r = convolve(r, rbfilter, mode="mirror")
g = convolve(g, gfilter, mode="mirror")
b = convolve(b, rbfilter, mode="mirror")
img = assembleimage(r,g,b)
return img | cab347871f4b0ebc71f82cb6776976f9e0756aee | 3,632,197 |
def heatmap(pois, sample_size=-1, kwd=None, tiles='OpenStreetMap', width='100%', height='100%', radius=10):
"""Generates a heatmap of the input POIs.
Args:
pois (GeoDataFrame): A POIs GeoDataFrame.
sample_size (int): Sample size (default: -1; show all).
kwd (string): A keyword to filter by (optional).
tiles (string): The tiles to use for the map (default: `OpenStreetMap`).
width (integer or percentage): Width of the map in pixels or percentage (default: 100%).
height (integer or percentage): Height of the map in pixels or percentage (default: 100%).
radius (float): Radius of each point of the heatmap (default: 10).
Returns:
A Folium Map object displaying the heatmap generated from the POIs.
"""
# Set the crs to WGS84
pois = to_wgs84(pois)
# Filter by keyword
if kwd is None:
pois_filtered = pois
else:
pois_filtered = filter_by_kwd(pois, kwd)
# Pick a sample
if sample_size > 0 and sample_size < len(pois_filtered.index):
pois_filtered = pois_filtered.sample(sample_size)
# Automatically center the map at the center of the gdf's bounding box
bb = bbox(pois_filtered)
map_center = [bb.centroid.y, bb.centroid.x]
heat_map = folium.Map(location=map_center, tiles=tiles, width=width, height=height)
# Automatically set zoom level
heat_map.fit_bounds(([bb.bounds[1], bb.bounds[0]], [bb.bounds[3], bb.bounds[2]]))
heat_data = [[row['geometry'].y, row['geometry'].x] for index, row in pois_filtered.iterrows()]
# Plot it on the map
HeatMap(heat_data, radius=radius).add_to(heat_map)
return heat_map | f099aeb54a0b3bc300a8c335c9663da63e6b53b7 | 3,632,198 |
from typing import Tuple
def _color_int_to_rgb(integer: int) -> Tuple[int, int, int]:
"""Convert an 24 bit integer into a RGB color tuple with the value range (0-255).
Parameters
----------
integer : int
The value that should be converted
Returns
-------
Tuple[int, int, int]:
The resulting RGB tuple.
"""
return ((integer >> 16) & 255, (integer >> 8) & 255, integer & 255) | df3eb5ad92d9383b0e6fe5c1603e0caec0df5c45 | 3,632,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.