content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def extract_text(arg: Message_T) -> str:
"""
提取消息中的纯文本部分(使用空格合并纯文本消息段)。
参数:
arg (nonebot.typing.Message_T):
"""
arg_as_msg = Message(arg)
return arg_as_msg.extract_plain_text()
| 5,342,600
|
def test_consolidate_elemental_array_():
"""
Tests that _consolidate_elemental_array_() returns a consolidates array of compositions for both whole number and decimal values
"""
input_array = [
{"symbol": "N", "occurances": 2},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 6},
{"symbol": "N", "occurances": 6},
{"symbol": "C", "occurances": 2}
]
output_array = [
{"symbol": "N", "occurances": 8},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 8}
]
input_array_dec = [
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4},
{"symbol": "H", "occurances": 1},
{"symbol": "O", "occurances": 0.5}
]
output_array_dec = [
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4.5},
{"symbol": "H", "occurances": 1}
]
assert _consolidate_elemental_array_(input_array) == output_array
assert _consolidate_elemental_array_(input_array_dec) == output_array_dec
| 5,342,601
|
def check_package_for_ext_mods(path, target_python):
"""Walk the directory path, calling :func:`check_ext_mod` on each file.
"""
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
check_ext_mod(os.path.join(path, dirpath, filename), target_python)
| 5,342,602
|
def dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
i_k = min(i + k, r - 1)
j_k = min(j + k, c - 1)
min_list += [D0[i_k, j], D0[i, j_k]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1], C, D1, path
| 5,342,603
|
def format_line_count_break(padding: int) -> str:
"""Return the line count break."""
return format_text(
" " * max(0, padding - len("...")) + "...\n", STYLE["detector_line_start"]
)
| 5,342,604
|
def identify_generic_specialization_types(
cls: type, generic_class: type
) -> Tuple[type, ...]:
"""
Identify the types of the specialization of generic class the class cls derives from.
:param cls: class which derives from a specialization of generic class.
:param generic_class: a generic class.
:return: specialization types.
"""
return get_args(find_generic_specialization_parent_class(cls, generic_class))
| 5,342,605
|
def Metadata():
"""Get a singleton that fetches GCE metadata.
Returns:
_GCEMetadata, An object used to collect information from the GCE metadata
server.
"""
def _CreateMetadata(unused_none):
global _metadata
if not _metadata:
_metadata = _GCEMetadata()
_metadata_lock.lock(function=_CreateMetadata, argument=None)
_metadata_lock.unlock()
return _metadata
| 5,342,606
|
def pa11y_counts(results):
"""
Given a list of pa11y results, return three integers:
number of errors, number of warnings, and number of notices.
"""
num_error = 0
num_warning = 0
num_notice = 0
for result in results:
if result['type'] == 'error':
num_error += 1
elif result['type'] == 'warning':
num_warning += 1
elif result['type'] == 'notice':
num_notice += 1
return num_error, num_warning, num_notice
| 5,342,607
|
def parse_properties(df, columns_to_integer=None, columns_to_datetime=None, columns_to_numeric=None, columns_to_boolean=None, columns_to_string = None, dt_unit = 'ms', boolean_dict = {'true': True, 'false': False, '': None}):
"""
Parse string columns to other formats. This function is used in hubspot routine, its not yet scaled to other routines
df: pd.DataFrame
columns_to_: list with names of the columns to parse
return: pd.DataFrame with parsed columns
"""
if columns_to_integer:
df[columns_to_integer] = df[columns_to_integer].apply(string_to_integer)
if columns_to_datetime:
df[columns_to_datetime] = df[columns_to_datetime].apply(pd.to_datetime, unit = dt_unit)
if columns_to_numeric:
df[columns_to_numeric] = df[columns_to_numeric].apply(pd.to_numeric, errors = 'coerce', downcast='float')
if columns_to_boolean:
df[columns_to_boolean] = df[columns_to_boolean].replace(boolean_dict).astype('boolean')
if columns_to_string:
df[columns_to_string] = df[columns_to_string].apply(int_to_string)
return df
| 5,342,608
|
def http_request(method, url, headers, data=None):
"""
Request util
:param method: GET or POST or PUT
:param url: url
:param headers: headers
:param data: optional data (needed for POST)
:return: response text
"""
response = requests.request(method, url, headers=headers, data=data)
if response.status_code not in [200, 201, 204]:
http_error_msg = u'%s HTTP request failed: %s for url: %s' % (response.status_code, response.text, url)
#print ("utils.http_request ", http_error_msg)
raise requests.exceptions.HTTPError(response.text)
return response.text
| 5,342,609
|
def beNice(obj):
"""Be nice : exponential backoff when over quota"""
wait = 1
while wait :
try :
return_value = obj.execute()
wait = 0
except : #FIXME : we should test the type of the exception
print("EXCEPT : Wait for %d seconds" % wait)
time.sleep(wait)
wait *= 2
return(return_value)
| 5,342,610
|
def asisted_singer_label(label_path):
"""
Assisted parser of labels
:param label_path: CSV file containing labels exported by parse_dataset.py script
:return:
"""
df = pd.read_csv(label_path)
unique_artists = list(set(df['artist']))
unique_labels = []
for artist in unique_artists:
x = input('¿El artista {} tiene 1 sólo cantante? [S/n]'.format(artist))
if str.lower(str.strip(x)) == 'n':
print('Error: El parser no soporta esta feature. Saltando este artista...')
continue
y = 1
x = y - 1
while x != y:
x = input('¿Como se llama el cantante de {}?'.format(artist))
y = input('Reingrese etiqueta para {} (previo: {})'.format(artist, x))
unique_labels.append(x)
df['label'] = df.apply(axis=1, func=lambda row: unique_labels[unique_artists.index(row['artist'])])
df.to_csv(label_path, index=False)
print('info: updated label file at {}'.format(label_path))
return
| 5,342,611
|
def parse_title(line):
"""if this is title, return Tuple[level, content],
@type line: str
@return: Optional[Tuple[level, content]]
"""
line = line.strip()
if not line.startswith('#'):
return None
sharp_count = 0
for c in line:
if c == '#':
sharp_count += 1
else:
break
if sharp_count == len(line):
return None
title = line[sharp_count:].strip()
return sharp_count, title
| 5,342,612
|
def issubtype(cls: type, clsinfo: type) -> bool:
"""
Return whether ``cls`` is a subclass of ``clsinfo`` while also considering
generics.
:param cls: the subject.
:param clsinfo: the object.
:return: True if ``cls`` is a subclass of ``clsinfo`` considering generics.
"""
info_generic_type, info_args = _split_generic(clsinfo)
if clsinfo in (typing.Any, object):
result = True
elif info_args:
result = _issubtype_generic(cls, info_generic_type, info_args)
else:
result = issubclass(_without_generic(cls), _without_generic(clsinfo))
return result
| 5,342,613
|
def delete_status(id):
"""Delete an existing status
The status to be deleted should be posted as JSON using
'application/json as the content type. The posted JSON needs to
have 2 required fields:
* user (the username)
* api_key
An example of the JSON::
{
"user": "r1cky",
"api_key": "qwertyuiopasdfghjklzxcvbnm1234567890"
}
"""
db = get_session(current_app)
# The data we need
user = request.json.get('user')
if not (id and user):
return jsonify(dict(error='Missing required fields.')), 400
status = db.query(Status).filter_by(id=id)
if not status.count():
return jsonify(dict(error='Status does not exist.')), 400
if not status[0].user.username == user:
return jsonify(dict(error='You cannot delete this status.')), 403
status.delete()
db.commit()
return jsonify(dict(id=id))
| 5,342,614
|
def process_data(data):
""" Change labels, group by planner and format for latex."""
data = data.replace(
{
"grid_run_1": "Grid",
"prm_run_1": "PRM A",
"prm_run_2": "PRM B",
"prm_run_3": "PRM C",
}
)
data = data.rename(
columns={"num_samples": "samples", "cc_checks": "collision checks"}
)
df = data.groupby(["run"]).sum()[["samples", "jvm", "time", "collision checks"]]
df["samples"] = np.round(df["samples"])
df["time"] = np.round(df["time"])
df["samples"] = np.round(df["collision checks"])
sr = data.groupby(["run"]).sum()[["success"]]
df["solved"] = sr.astype(int).astype(str) + "/14"
latex = df.to_latex(
formatters={
"samples": "{:,.0f}".format,
"jvm": "{:.2f}".format,
"collision checks": "{:,.0f}".format,
"time": "{:.0f}".format,
}
)
return df, latex
| 5,342,615
|
def detect_outlier(TS, samples_wind=60, order=3):
"""Find outliers in TS by interpolate one sample at a time, measure diff.
between rec. sample and interpolated, and getting the peaks in the int diff
across recording.
Parameters
-------------
TS : array (x, y) x n_samples
Times series to extract features
samples_wind : int
Window length of segment where a sample is interpolated.
order : int
B-sline interpolation order
Returns
--------
outliers: list of array n_chans [n_outliers]
Indices of outliers per chans
outliers_int: list of array n_chans [n_outliers]
New interpolated values of the outliers
"""
s_win_half = int(samples_wind/2)
outliers = []
outliers_int = []
zdiffs = []
for ts in TS:
n_samples, = ts.shape
diff = [np.nan]
ts_int_one = [np.nan]
for w in range(1,n_samples-1):
wix = [w-s_win_half,w+s_win_half]
# Bound beg or end if outside
wix[0] = 0 if wix[0]<0 else wix[0]
wix[1] = n_samples if wix[1]>n_samples else wix[1]
seg1, seg2 = ts[wix[0]:w], ts[w+1:wix[1]]
seg = np.concatenate((seg1,seg2))
# make indexes ts with and without sample
ixs = np.arange(seg.shape[0]+1)
ixs_out =np. delete(ixs, np.argwhere(ixs == seg1.shape[0]))
# Interpolate and measure diff
fcubic = interpolate.interp1d(ixs_out, seg, kind=order)
ts_int_out = fcubic(ixs)
smpl_int = ts_int_out[seg1.shape[0]]
diff.append(np.abs(smpl_int-ts[w]))
ts_int_one.append(smpl_int)
diff_z = zscore(diff)
pks_p, _ = feat_ext.find_maxmin_peaks(diff_z[1:], height=5)
pks_p = pks_p + 1 # add 1 sampl ( first is nan)
int_smp = np.array(ts_int_one)[pks_p]
outliers.append(pks_p)
outliers_int.append(int_smp)
zdiffs.append(diff_z)
return outliers, outliers_int, np.array(zdiffs)
| 5,342,616
|
def postprocess(backpointers, best_tag_id):
"""Do postprocess."""
best_tag_id = best_tag_id.asnumpy()
batch_size = len(best_tag_id)
best_path = []
for i in range(batch_size):
best_path.append([])
best_local_id = best_tag_id[i]
best_path[-1].append(best_local_id)
for bptrs_t in reversed(backpointers):
bptrs_t = bptrs_t[0].asnumpy()
local_idx = bptrs_t[i]
best_local_id = local_idx[best_local_id]
best_path[-1].append(best_local_id)
# Pop off the start tag (we dont want to return that to the caller)
best_path[-1].pop()
best_path[-1].reverse()
return best_path
| 5,342,617
|
def carnatic_string_to_ql_array(string_):
"""
:param str string_: A string of carnatic durations separated by spaces.
:return: The input string converted to a quarter length array.
:rtype: numpy.array.
>>> carnatic_string_to_ql_array('oc o | | Sc S o o o')
array([0.375, 0.25 , 0.5 , 0.5 , 1.5 , 1. , 0.25 , 0.25 , 0.25 ])
"""
split_string = string_.split()
vals = []
for token in split_string:
try:
if carnatic_symbols[token] is not None:
vals.append(carnatic_symbols[token]["value"])
except KeyError:
pass
return np.array(vals)
| 5,342,618
|
async def test_button_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test error handling of the WLED buttons."""
mock_wled.reset.side_effect = WLEDError
with pytest.raises(HomeAssistantError, match="Invalid response from WLED API"):
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{ATTR_ENTITY_ID: "button.wled_rgb_light_restart"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("button.wled_rgb_light_restart")
assert state
assert state.state == "2021-11-04T16:37:00+00:00"
| 5,342,619
|
def draw_fixations(fixations, dispsize, imagefile=None, durationsize=True, durationcolour=True, alpha=0.5, savefilename=None):
"""Draws circles on the fixation locations, optionally on top of an image,
with optional weigthing of the duration for circle size and colour
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
durationsize - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the circle
size; longer duration = bigger (default = True)
durationcolour - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the circle
colour; longer duration = hotter (default = True)
alpha - float between 0 and 1, indicating the transparancy of
the heatmap, where 0 is completely transparant and 1
is completely untransparant (default = 0.5)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
fixations
"""
# FIXATIONS
fix = parse_fixations(fixations)
# IMAGE
fig, ax = draw_display(dispsize, imagefile=imagefile)
# CIRCLES
# duration weigths
if durationsize:
siz = 200 * (fix['dur']/30.0)
else:
siz = 1 * numpy.median(fix['dur']/30.0)
if durationcolour:
col = fix['dur']
else:
col = COLS['chameleon'][2]
# draw circles
#ax.scatter(fix['x'],fix['y'], s=siz, c=col, marker='o', cmap='jet', alpha=alpha, edgecolors='face')
graf=ax.scatter(fix['x'],fix['y'], s=siz, c=col, cmap='jet', alpha=alpha, edgecolors='face')
#graf.set_facecolor('none')
# FINISH PLOT
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename)
#return fig
| 5,342,620
|
def makedirs(path):
"""Replacement for Python's built-in "mkdirs" from the os module
This version of makedirs makes sure that all directories created are
world-writable. This is necessary because the MAR CC Dserver writes
from a different computer with a different user id (marccd=500) than
then user caccount on the beamline control computer (useridb=615).
"""
from os import makedirs
if not exists(path): makedirs(path)
try: chmod (path,0777)
except OSError: pass
| 5,342,621
|
def get_examples(fpath, doc_dir, max_seq_len=-1, max_sent_num=200, sent_level=True):
"""
Get data from tsv files.
Input:
fpath -- the file path.
Assume number of classes = 2
Output:
ts -- a list of strings (each contain the text)
ys -- float32 np array (num_example, )
zs -- float32 np array (num_example, )
ss -- float32 np array (num_example, num_sent, sequence_length)
szs -- float32 np array (num_example, num_sent)
"""
n = -1
ts = []
ys = []
zs = []
ss = []
s_labels = []
min_len = 10000
max_len = 0
avg_z_len = 0.
avg_num_sent = 0.
real_max_sent_num = 0
avg_r_num = 0.
with open(fpath, "r") as f:
for line in tqdm(f):
json_data = json.loads(line.strip())
doc_filename = json_data['annotation_id']
file_doc = open(os.path.join(doc_dir, doc_filename))
sentences = file_doc.readlines()
s_masks = []
sentences = [s.strip().split() for s in sentences]
t = [inner for outer in sentences for inner in outer]
cur_id = 0
for sentence in sentences:
if len(s_masks) < max_sent_num:
s_masks.append([0.0] * len(t))
for token in sentence:
s_masks[-1][cur_id] = 1.0
cur_id += 1
avg_num_sent += len(s_masks)
if len(s_masks) > real_max_sent_num:
real_max_sent_num = len(s_masks)
if max_seq_len > 0:
t = t[:max_seq_len]
# print(t)
if len(t) > max_len:
max_len = len(t)
if len(t) < min_len:
min_len = len(t)
y = json_data['classification']
if y == 'POS':
y = 1
elif y == 'NEG':
y = 0
else:
print('ERROR: label {}'.format(y))
evidences = json_data['evidences']
z = [0] * len(t)
z_len = 0
for evidence_list in evidences:
for evidence in evidence_list:
z_start = evidence['start_token']
z_end = evidence['end_token']
z_end = min(z_end, len(t))
z_text = evidence['text']
for idx in range(z_start, z_end):
z[idx] = 1
z_len += 1
if max_seq_len < 0:
assert z_text == ' '.join(t[z_start:z_end]), z_text + '<->' + ' '.join(t[z_start:z_end])
else:
if z_end < max_seq_len:
assert z_text == ' '.join(t[z_start:z_end]), z_text + '<->' + ' '.join(t[z_start:z_end])
# print(z_text)
# print(t[z_start:z_end])
avg_z_len += z_len
if sent_level:
s_label = [0.] * len(s_masks)
new_z = [0] * len(t)
for sid, s_mask in enumerate(s_masks):
is_rationale = False
for idx, val in enumerate(s_mask):
if idx >= max_seq_len:
continue
if val == 1.0:
if z[idx] == 1:
is_rationale = True
break
if is_rationale:
avg_r_num += 1
s_label[sid] = 1.
for idx, val in enumerate(s_mask):
if idx >= max_seq_len:
continue
if val == 1.0:
new_z[idx] = 1
# z = new_z
# break
# s_spans = json_data['sentences']
# # if len(s_spans) > max_sent_num:
# # max_sent_num = len(s_spans)
# # # print(line)
# s_masks = []
# for sid, s_span in enumerate(s_spans):
# (b, e) = s_span
# if b >= max_seq_len:
# break
# # print(len(s_masks))
# # print(max_sent_num)
# if len(s_masks) < max_sent_num:
# s_masks.append([0.0] * len(t))
# for i in range(b, e):
# # print(len(s_masks[-1]), i)
# if i >= max_seq_len:
# break
# s_masks[-1][i] = 1.0
# if len(s_masks) > real_max_sent_num:
# real_max_sent_num = len(s_masks)
ts.append(t)
ys.append(y)
zs.append(z)
ss.append(s_masks)
if sent_level:
s_labels.append(s_label)
# print('len s_mask:', len(s_masks))
# print('len s_label:', len(s_label))
assert len(s_masks) == len(s_label)
n += 1
# print(avg_z_len)
print("Number of examples: %d" % n)
print("Maximum doc length: %d" % max_len)
print("Minimum doc length: %d" % min_len)
print("Average length of rationales: %.4f" % (avg_z_len / n) )
print("Average sent number: %d" % (avg_num_sent/n))
print("Maximum sent number: %d" % real_max_sent_num)
print("Average rationle-sent number: %d" % (avg_r_num / n))
if sent_level:
return ts, ys, zs, ss, s_labels
return ts, ys, zs, ss
| 5,342,622
|
def login_redirect(request: HttpRequest) -> HttpResponse:
"""
Redirects the user to the Strava authorization page
:param request: HttpRequest
:return: HttpResponse
"""
strava_uri = get_strava_uri()
return redirect(strava_uri)
| 5,342,623
|
def f(p, x):
"""
Parameters
----------
p : list
A that has a length of at least 2.
x : int or float
Scaling factor for the first variable in p.
Returns
-------
int or float
Returns the first value in p scaled by x, aded by the second value in p.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import f
>>> p = [1, 2]
>>> x = 10
>>> f(p, x)
12
>>> p = np.array([5.16312215, 8.79307163])
>>> x = 2.0
>>> np.around(f(p, x),8)
19.11931593
"""
return (p[0] * x) + p[1]
| 5,342,624
|
def get_contributors_users(users_info) -> list:
"""
Get the github users from the inner PRs.
Args:
users_info (list): the response of get_inner_pr_request()
Returns (list): Github users
"""
users = []
for item in users_info:
user = item.get('login')
github_profile = item.get('html_url')
pr_body = item.get('body')
if not user == 'xsoar-bot':
users.append({
'Contributor': f"<img src='{item.get('avatar_url')}'/><br></br> "
f"<a href='{github_profile}' target='_blank'>{user}</a>"
})
if user == 'xsoar-bot':
if 'Contributor' in pr_body:
contributor = USER_NAME_REGEX.search(pr_body)[0].replace('\n', '')
user_info = get_github_user(contributor)
github_avatar = user_info.get('avatar_url')
github_profile = user_info.get('html_url')
if not github_avatar and not github_profile:
print(f'The user "{contributor}" was not found.')
continue
users.append({
'Contributor': f"<img src='{github_avatar}'/><br></br> "
f"<a href='{github_profile}' target='_blank'>{contributor}</a>"
})
for user in users:
prs = users.count(user)
user.update({'Number of Contribution(s)': prs})
list_users = []
result = {i['Contributor']: i for i in reversed(users)}.values()
new_res = sorted(result, key=lambda k: k['Number of Contribution(s)'], reverse=True)
for user in new_res:
user['Contributor'] += f'<br></br>{user["Number of Contribution(s)"]} Contributions'
list_users.append(user['Contributor'])
return list_users
| 5,342,625
|
def euler2mat(roll, pitch, yaw):
"""
Create a rotation matrix for the orientation expressed by this transform.
Copied directly from FRotationTranslationMatrix::FRotationTranslationMatrix
in Engine/Source/Runtime/Core/Public/Math/RotationTranslationMatrix.h ln 32
:return:
"""
angles = _TORAD * np.array((roll, pitch, yaw))
sr, sp, sy = np.sin(angles)
cr, cp, cy = np.cos(angles)
return np.array([
[cp * cy, sr * sp * cy - cr * sy, -(cr * sp * cy + sr * sy)],
[cp * sy, sr * sp * sy + cr * cy, cy * sr - cr * sp * sy],
[sp, -sr * cp, cr * cp]
])
| 5,342,626
|
def remove_articles(string: str, p: float = 1.0) -> str:
"""Remove articles from text data.
Matches and removes the following articles:
* the
* a
* an
* these
* those
* his
* hers
* their
with probability p.
Args:
string: text
p: probability of removing a given article
Returns:
enriched text
"""
mapping = {article: "" for article in ARTICLES}
return _sub_words(string, probability=p, mapping=mapping)
| 5,342,627
|
def _remove_overlaps(in_file, out_dir, data):
"""Remove regions that overlap with next region, these result in issues with PureCN.
"""
out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
prev_line = None
for line in in_handle:
if prev_line:
pchrom, pstart, pend = prev_line.split("\t", 4)[:3]
cchrom, cstart, cend = line.split("\t", 4)[:3]
# Skip if chromosomes match and end overlaps start
if pchrom == cchrom and int(pend) > int(cstart):
pass
else:
out_handle.write(prev_line)
prev_line = line
out_handle.write(prev_line)
return out_file
| 5,342,628
|
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
"""Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
"""
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r"\s\s+", line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len
| 5,342,629
|
def test_repr(functional_group):
"""
Test :meth:`.FunctionalGroup.__repr__`.
Parameters
----------
functional_group : :class:`.FunctionalGroup`
The functional group whose representation is tested.
Returns
-------
None : :class:`NoneType`
"""
other = eval(repr(functional_group), dict(stk.__dict__))
is_equivalent_functional_group(functional_group, other)
| 5,342,630
|
def test_smooth_n_pt_9_units():
"""Test the smooth_n_pt function using 9 points with units."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
shght = smooth_n_point(hght, 9, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.5, 5666.75, 5658.75, 5651.],
[5728., 5711., 5693.5, 5677.5, 5662.],
[5772., 5746.5, 5720.25, 5696.25, 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
assert_array_almost_equal(shght, s_true)
| 5,342,631
|
def pairs_of_response(request):
"""pairwise testing for content-type, headers in responses for all urls """
response = requests.get(request.param[0], headers=request.param[1])
print(request.param[0])
print(request.param[1])
return response
| 5,342,632
|
def load_gtdb_tax(infile, graph):
"""
loading gtdb taxonomy & adding to DAG
"""
# url or file download/open
try:
inF = gtdb2td.Utils.get_url_data(infile)
except (OSError,ValueError) as e:
try:
ftpstream = urllib.request.urlopen(infile)
inF = csv.reader(codecs.iterdecode(ftpstream, 'utf-8'))
except ValueError:
inF = gtdb2td.Utils.Open(infile)
# parsing lines
for i,line in enumerate(inF):
try:
line = line.rstrip()
except AttributeError:
line = line[0].rstrip()
if line == '':
continue
line = gtdb2td.Utils.Decode(line)
line = line.split('\t')
if len(line) < 2:
msg = 'Line{} does not contain >=2 columns'
raise ValueError(msg.format(i+1))
tax = line[1].split(';')
if len(tax) < 7:
msg = 'WARNING: Line{}: taxonomy length is <7'
logging.info(msg.format(i+1))
tax.append(line[0])
# adding taxonomy to graph
for ii,cls in enumerate(tax):
graph.add_vertex(cls)
if ii == 0:
graph.add_edge('root', cls)
else:
graph.add_edge(tax[ii-1], cls)
try:
inF.close()
except AttributeError:
pass
| 5,342,633
|
def setup():
""" The setup wizard screen """
if DRIVER is True:
flash(Markup('Driver not loaded'), 'danger')
return render_template("setup.html")
| 5,342,634
|
def ldns_pkt_set_edns_extended_rcode(*args):
"""LDNS buffer."""
return _ldns.ldns_pkt_set_edns_extended_rcode(*args)
| 5,342,635
|
def sim_spiketrain_poisson(rate, n_samples, fs, bias=0):
"""Simulate spike train from a Poisson distribution.
Parameters
----------
rate : float
The firing rate of neuron to simulate.
n_samples : int
The number of samples to simulate.
fs : int
The sampling rate.
Returns
-------
spikes : 1d array
Simulated spike train.
Examples
--------
Simulate a spike train from a Poisson distribution.
>>> spikes = sim_spiketrain_poisson(0.4, 10, 1000, bias=0)
"""
spikes = np.zeros(n_samples)
# Create uniform sampling distribution
unif = np.random.uniform(0, 1, size=n_samples)
# Create spikes
mask = unif <= ((rate + bias) * 1/fs)
spikes[mask] = 1
return spikes
| 5,342,636
|
def getHausdorff(labels, predictions):
"""Compute the Hausdorff distance."""
# Hausdorff distance is only defined when something is detected
resultStatistics = sitk.StatisticsImageFilter()
resultStatistics.Execute(predictions)
if resultStatistics.GetSum() == 0:
return float('nan')
# Edge detection is done by ORIGINAL - ERODED, keeping the outer boundaries of lesions. Erosion is performed in 2D
eTestImage = sitk.BinaryErode(labels, (1, 1, 0))
eResultImage = sitk.BinaryErode(predictions, (1, 1, 0))
hTestImage = sitk.Subtract(labels, eTestImage)
hResultImage = sitk.Subtract(predictions, eResultImage)
hTestArray = sitk.GetArrayFromImage(hTestImage)
hResultArray = sitk.GetArrayFromImage(hResultImage)
# Convert voxel location to world coordinates. Use the coordinate system of the test image
# np.nonzero = elements of the boundary in numpy order (zyx)
# np.flipud = elements in xyz order
# np.transpose = create tuples (x,y,z)
# labels.TransformIndexToPhysicalPoint converts (xyz) to world coordinates (in mm)
testCoordinates = np.apply_along_axis(labels.TransformIndexToPhysicalPoint, 1,
np.transpose(np.flipud(np.nonzero(hTestArray))).astype(int))
resultCoordinates = np.apply_along_axis(labels.TransformIndexToPhysicalPoint, 1,
np.transpose(np.flipud(np.nonzero(hResultArray))).astype(int))
# Compute distances from test to result; and result to test
dTestToResult = getDistancesFromAtoB(testCoordinates, resultCoordinates)
dResultToTest = getDistancesFromAtoB(resultCoordinates, testCoordinates)
return max(np.percentile(dTestToResult, 95), np.percentile(dResultToTest, 95))
| 5,342,637
|
def _filesizeformat(file_str):
"""
Remove the unicode characters from the output of the filesizeformat()
function.
:param file_str:
:returns: A string representation of a filesizeformat() string
"""
cmpts = re.match(r'(\d+\.?\d*)\S(\w+)', filesizeformat(file_str))
return '{} {}'.format(cmpts.group(1), cmpts.group(2))
| 5,342,638
|
def radec_to_lb(ra, dec, frac=False):
"""
Convert from ra, dec to galactic coordinates.
Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by
Bradley W. Carroll, Dale A. Ostlie (Eq. 24.16 onwards).
NOTE: This function is not as accurate as the astropy conversion, nor as
the Javascript calculators found online. However, as using astropy was
prohibitively slow while running over large populations, we use this
function. While this function is not as accurate, the under/over
estimations of the coordinates are equally distributed meaning the errors
cancel each other in the limit of large populations.
Args:
ra (string): Right ascension given in the form '19:06:53'
dec (string): Declination given in the form '-40:37:14'
frac (bool): Denote whether coordinates are already fractional or not
Returns:
gl, gb (float): Galactic longitude and latitude [fractional degrees]
"""
if not frac:
ra, dec = frac_deg(ra, dec)
a = math.radians(ra)
d = math.radians(dec)
# Coordinates of the galactic north pole (J2000)
a_ngp = math.radians(12.9406333 * 15.)
d_ngp = math.radians(27.1282500)
l_ngp = math.radians(123.9320000)
sd_ngp = math.sin(d_ngp)
cd_ngp = math.cos(d_ngp)
sd = math.sin(d)
cd = math.cos(d)
# Calculate galactic longitude
y = cd*math.sin(a - a_ngp)
x = cd_ngp*sd - sd_ngp*cd*math.cos(a - a_ngp)
gl = - math.atan2(y, x) + l_ngp
gl = math.degrees(gl) % 360
# Shift so in range -180 to 180
if gl > 180:
gl = -(360 - gl)
# Calculate galactic latitude
gb = math.asin(sd_ngp*sd + cd_ngp*cd*math.cos(a - a_ngp))
gb = math.degrees(gb) % 360.
if gb > 270:
gb = -(360 - gb)
return gl, gb
| 5,342,639
|
def status_codes_by_date_stats():
"""
Get stats for status codes by date.
Returns:
list: status codes + date grouped by type: 2xx, 3xx, 4xx, 5xx, attacks.
"""
def date_counter(queryset):
return dict(Counter(map(
lambda dt: ms_since_epoch(datetime.combine(
make_naive(dt), datetime.min.time())),
list(queryset.values_list('datetime', flat=True)))))
codes = {low: date_counter(
RequestLog.objects.filter(status_code__gte=low, status_code__lt=high))
for low, high in ((200, 300), (300, 400), (400, 500))}
codes[500] = date_counter(RequestLog.objects.filter(status_code__gte=500))
codes['attacks'] = date_counter(RequestLog.objects.filter(
status_code__in=(400, 444, 502)))
stats = {}
for code in (200, 300, 400, 500, 'attacks'):
for date, count in codes[code].items():
if stats.get(date, None) is None:
stats[date] = {200: 0, 300: 0, 400: 0, 500: 0, 'attacks': 0}
stats[date][code] += count
stats = sorted([(k, v) for k, v in stats.items()], key=lambda x: x[0])
return stats
| 5,342,640
|
def do_server_monitor(client, args):
""" Show monitor info of a virtual server """
guest = client.guests.get_specific(args.id, 'monitor', command=args.cmd)
print(guest['results'])
| 5,342,641
|
def r8_y1x ( t ):
"""
#*****************************************************************************80
#
#% R8_Y1X evaluates the exact solution of the ODE.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 30 August 2010
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, real T, the value of the independent variable.
#
# Output, real Y1X, the exact solution.
#
"""
y1x = 20.0 / ( 1.0 + 19.0 * exp ( - 0.25 * t ) )
return(y1x)
| 5,342,642
|
def test_lockedmove_vs():
"""Make sure lockedmove is handled properly."""
dragonite = Pokemon(name="dragonite", moves=["outrage"])
aggron_target = Pokemon(name="aggron", moves=["recover"])
outrage = VolatileStatusMove(**MOVE_DATA["outrage"])
outrage.apply_volatile_status(dragonite, aggron_target)
assert dragonite.volatile_status
assert "lockedmove" in dragonite.volatile_status
assert dragonite.volatile_status["lockedmove"]["move"] == outrage
assert dragonite.volatile_status["lockedmove"]["counter"] == 0
| 5,342,643
|
def urlhause(query):
"""
The URLHause us a very nice threat feed site, which can be used to see which website delivers which malware.
That being said, they only provide API to download csv dumps of their registered malicious domain and currently the API
can not be used to query for information directly. While downloading dumps is easy and quick, it is not real when you want real
time data. So I created a web scraper using requests and BeautifulSoup module. It queries the site by changing the URL.
And scarping the HTML resonse obtains the result.
Example:
>>> from urlhause import urlhause
>>> urlhause("senorita")
Date: 2018-07-07 06:14:04 Link: http://www.senoritasmargaritas.com/wp-includes/...
Status: Offline, Tags: doc Trickbot , Reporter: @p5yb34m
>>> urlhause("<any IP address>")
NOTE: The script obtains any useful infomration from the HTML response, however if you feel like it does not
Change per your will
"""
response = requests.get("https://urlhaus.abuse.ch/browse.php?search={}".format(query))
lost = response.text
soup = BeautifulSoup(lost,"html.parser")
tables = soup.find("table")
all_rows = tables.find_all("tr")
if len(all_rows) < 2:
print("Not listed in URLHause")
else:
for i in range(1,len(all_rows)):
get_tds = all_rows[i].find_all("td")
print("|Date: {}| |Link: {}| |Status: {}| |Tags: {}| |Reporter: {}|\n".format(get_tds[0].text,get_tds[1].text,get_tds[2].text,get_tds[3].text,get_tds[4].text))
| 5,342,644
|
def verdi_process():
"""Inspect and manage processes."""
| 5,342,645
|
def clear_channels(header, channels=None):
"""Utility function for management of channel related metadata."""
bitmask = BitmaskWrapper(header['channel_mask'])
if channels is not None:
_verify_channels(channels)
bitmask.clear([channel-1 for channel in channels])
else:
bitmask.clear()
header['active_channels'] = bitmask.count()
| 5,342,646
|
def get_target_model_ops(model, model_tr):
"""Get operations related to the target model.
Args:
* model: original model
* model_tr: target model
Returns:
* init_op: initialization operation for the target model
* updt_op: update operation for the target model
"""
init_ops, updt_ops = [], []
for var, var_tr in zip(model.vars, model_tr.vars):
init_ops.append(tf.assign(var_tr, var))
if var not in model.trainable_vars:
updt_ops.append(tf.assign(var_tr, var)) # direct update for non-trainable variables
else:
updt_ops.append(tf.assign(var_tr, (1. - FLAGS.ddpg_tau) * var_tr + FLAGS.ddpg_tau * var))
return tf.group(*init_ops), tf.group(*updt_ops)
| 5,342,647
|
def gotit():
"""
function to react to silver token found
"""
R.grab()
print("Gotcha!")
turn(25, 2.35)
drive(20,1)
R.release()
drive(-20,2)
turn(-25,2.35)
| 5,342,648
|
def next_count(start: int = 0, step: int = 1):
"""Return a callable returning descending ints.
>>> nxt = next_count(1)
>>> nxt()
1
>>> nxt()
2
"""
count = itertools.count(start, step)
return functools.partial(next, count)
| 5,342,649
|
def show_locale(key_id: int):
"""Get a locale by ID"""
return locales[key_id]
| 5,342,650
|
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
update_db()
DB.session.commit()
records = Record.query.all()
return render_template('aq_base.html', title='Refreshed!', records=records)
| 5,342,651
|
def get_similar_taxa():
"""
Get a list of all pairwise permutations of taxa sorted according to similarity
Useful for detecting duplicate and near-duplicate taxonomic entries
:return: list of 2-tuples ordered most similar to least
"""
taxa = Taxon.objects.all()
taxon_name_set = set([t.name for t in taxa])
plist = [pair for pair in permutations(taxon_name_set, 2)]
return sorted(plist, key=similar, reverse=True)
| 5,342,652
|
def make_img_id(label, name):
""" Creates the image ID for an image.
Args:
label: The image label.
name: The name of the image within the label.
Returns:
The image ID. """
return json.dumps([label, name])
| 5,342,653
|
def col2rgb(color):
""" Convert any colour known by matplotlib to RGB [0-255] """
return rgb012rgb(*col2rgb01(color))
| 5,342,654
|
def quick_select_median(values: List[tuple], pivot_fn=random.choice, index=0) -> tuple:
"""
Implementation quick select median sort
:param values: List[tuple]
:param pivot_fn:
:param index: int
:return: tuple
"""
k = len(values) // 2
return quick_select(values, k, pivot_fn, index=index)
| 5,342,655
|
def get_admin_token(key, previous=False):
"""Returns a token with administrative priviledges
Administrative tokens provide a signature that can be used to authorize
edits and to trigger specific administrative events.
Args:
key (str): The key for generating admin tokens
previous (bool, optional): Retrieve the most recently issued token for this key
Returns:
Token
"""
if key is None:
raise ValueError('Value for "key" was expected')
expires = get_admin_lifetime()
secret = __get_admin_salt()
argset = [secret, key]
ts = int(time.time())
if previous:
ts = ts - expires
argset.extend(str(int(ts / expires)))
str_argset = [str(a) for a in argset if True]
msg = ':'.join(str_argset)
tok = Token(hashlib.sha256(msg.encode('utf-8')).hexdigest()[
0:settings.TOKEN_LENGTH])
return tok
| 5,342,656
|
def modSymbolsFromLabelInfo(labelDescriptor):
"""Returns a set of all modiciation symbols which were used in the
labelDescriptor
:param labelDescriptor: :class:`LabelDescriptor` describes the label setup
of an experiment
:returns: #TODO: docstring
"""
modSymbols = set()
for labelStateEntry in viewvalues(labelDescriptor.labels):
for labelPositionEntry in viewvalues(labelStateEntry['aminoAcidLabels']):
for modSymbol in aux.toList(labelPositionEntry):
if modSymbol != '':
modSymbols.add(modSymbol)
return modSymbols
| 5,342,657
|
def vulnerability_weibull(x, alpha, beta):
"""Return vulnerability in Weibull CDF
Args:
x: 3sec gust wind speed at 10m height
alpha: parameter value used in defining vulnerability curve
beta: ditto
Returns: weibull_min.cdf(x, shape, loc=0, scale)
Note:
weibull_min.pdf = c/s * (x/s)**(c-1) * exp(-(x/s)**c)
c: shape, s: scale, loc=0
weibull_min.cdf = 1 - exp(-(x/s)**c)
while Australian wind vulnerability is defined as
DI = 1 - exp(-(x/exp(beta))**(1/alpha))
therefore:
s = exp(beta)
c = 1/alpha
"""
# convert alpha and beta to shape and scale respectively
shape = 1 / alpha
scale = np.exp(beta)
return weibull_min.cdf(x, shape, loc=0, scale=scale)
| 5,342,658
|
def _find_best_deals(analysis_json) -> tuple:
"""Finds the best deal out of the analysis"""
best_deals = []
for deal in analysis_json:
if _get_deal_value(analysis_json, deal) > MINIMUM_ConC_PERCENT:
best_deals.append(deal)
best_deals.sort(key=lambda x: _get_deal_value(analysis_json, x),
reverse=True
)
best_deal = best_deals[0]
return best_deal, best_deals
| 5,342,659
|
def deserialize(s_transform):
"""
Convert a serialized
:param s_transform:
:return:
"""
if s_transform is None:
return UnrealTransform()
return UnrealTransform(
location=s_transform['location'] if 'location' in s_transform else (0, 0, 0),
rotation=s_transform['rotation'] if 'rotation' in s_transform else (0, 0, 0)
)
| 5,342,660
|
def game(key):
"""Handles key events in the game"""
direction = DIRECTIONS.get(key)
if direction:
move(maze, direction, "player")
| 5,342,661
|
def spectrum(x, times=None, null_hypothesis=None, counts=1, frequencies='auto', transform='dct',
returnfrequencies=True):
"""
Generates a power spectrum from the input time-series data. Before converting to a power
spectrum, x is rescaled as
x - > (x - counts * null_hypothesis) / sqrt(counts * null_hypothesis * (1-null_hypothesis)),
where the arithmetic is element-wise, and `null_hypothesis` is a vector in (0,1).
If `null_hypothesis` is None it is set to the mean of x. If that mean is 0 or 1 then
the power spectrum returned is (0,1,1,1,...).
Parameters
----------
x: array
The time-series data to convert into a power spectrum
times: array, optional
The times associated with the data in `x`. This is not optional for the `lsp` transform
null_hypothesis: None or array, optional
Used to normalize the data, and should be the null hypothesis that is being tested for
the probability trajectory from which `x` is drawn. If `null_hypothesis` is None it is
set to the mean of x.
counts: int, optional
The number of counts per time-step, whereby all values of `x` are within [0,counts].
In the main usages for drift detection, `x` is the clickstream for a single measurement
outcome -- so `x` contains integers between 0 and the number of measurements at a (perhaps
coarse-grained) time. `counts` is this number of measurements per time.
frequencies: 'auto' or array, optional
The frequencies to generate the power spectrum for. Only relevant for transform=`lsp`.
transform: 'dct', 'dft' or 'lsp', optional
The transform to use to generate power spectrum. 'dct' is the Type-II discrete cosine transform
with an orthogonal normalization; 'dft' is the discrete Fourier transform with a unitary
normalization; 'lsp' is the float-meaning Lomb-Scargle periodogram with an orthogonal-like
normalization.
returnfrequencies: bool, optional
Whether to return the frequencies corrsponding to the powers
Returns
-------
if returnfrequencies:
array or None
The frequencies corresponding to the power spectrum. None is returned if the frequencies
cannot be ascertained (when `times` is not specified).
array or None
The amplitudes, that are squared to obtain the powers. None is returned when the transform
does not generate amplitudes (this is the case for `lsp`)
array
The power spectrum
"""
if transform == 'dct' or transform == 'dft':
if transform == 'dct':
modes = dct(x, null_hypothesis, counts)
powers = modes**2
elif transform == 'dft':
modes = dft(x, null_hypothesis, counts)
powers = _np.abs(modes)**2
if returnfrequencies:
if isinstance(frequencies, str):
if times is None: freqs = None
else: freqs = fourier_frequencies_from_times(times)
else:
freqs = frequencies
return freqs, modes, powers
else:
return modes, powers
elif transform == 'lsp':
freqs, powers = lsp(x, times, frequencies, null_hypothesis, counts)
modes = None
if returnfrequencies:
return freqs, modes, powers
else:
return modes, powers
else:
raise ValueError("Input `transform` type invalid!")
| 5,342,662
|
def CalculatePercentIdentity(pair, gap_char="-"):
"""return number of idential and transitions/transversions substitutions
in the alignment.
"""
transitions = ("AG", "GA", "CT", "TC")
transversions = ("AT", "TA", "GT", "TG", "GC", "CG", "AC", "CA")
nidentical = 0
naligned = 0
ndifferent = 0
ntransitions = 0
ntransversions = 0
nunaligned = 0
for x in range(min(len(pair.mAlignedSequence1), len(pair.mAlignedSequence2))):
if pair.mAlignedSequence1[x] != gap_char and \
pair.mAlignedSequence2[x] != gap_char:
naligned += 1
if pair.mAlignedSequence1[x] == pair.mAlignedSequence2[x]:
nidentical += 1
else:
ndifferent += 1
if (pair.mAlignedSequence1[x] + pair.mAlignedSequence2[x]) in transitions:
ntransitions += 1
if (pair.mAlignedSequence1[x] + pair.mAlignedSequence2[x]) in transversions:
ntransversions += 1
else:
nunaligned += 1
return nidentical, ntransitions, ntransversions, naligned, nunaligned
| 5,342,663
|
def render_ellipse(center_x, center_y, covariance_matrix, distance_square):
"""
Renders a Bokeh Ellipse object given the ellipse center point, covariance, and distance square
:param center_x: x-coordinate of ellipse center
:param center_y: y-coordinate of ellipse center
:param covariance_matrix: NumPy array containing the covariance matrix of the ellipse
:param distance_square: value for distance square of ellipse
:return: Bokeh Ellipse object
"""
values, vectors = np.linalg.eigh(covariance_matrix)
order = values.argsort()[::-1]
values = values[order]
vectors = vectors[:, order]
angle_rads = np.arctan2(*vectors[:, 0][::-1])
# Width and height are full width (the axes lengths are thus multiplied by 2.0 here)
width, height = 2.0 * np.sqrt(values * distance_square)
ellipse = Ellipse(
x=center_x,
y=center_y,
width=width,
height=height,
angle=angle_rads,
line_width=line_width,
line_color=line_color,
fill_color=fill_color,
fill_alpha=fill_alpha
)
return ellipse
| 5,342,664
|
def flow_to_image(flow):
"""Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out))
| 5,342,665
|
def test_api_homeassistant_stop(hassio_handler, aioclient_mock):
"""Test setup with API HomeAssistant stop."""
aioclient_mock.post(
"http://127.0.0.1/homeassistant/stop", json={'result': 'ok'})
assert (yield from hassio_handler.stop_homeassistant())
assert aioclient_mock.call_count == 1
| 5,342,666
|
def joint_dataset(l1, l2):
"""
Create a joint dataset for two non-negative integer (boolean) arrays.
Works best for integer arrays with values [0,N) and [0,M) respectively.
This function will create an array with values [0,N*M), each value
representing a possible combination of values from l1 and l2. Essentially,
this is equivalent to zipping l1 and l2, but much faster by using the NumPy
native implementations of elementwise addition and multiplication.
:param l1: first integer vector (values within 0-n)
:type l1: numpy.array or similar
:param l2: second integer vector (values with 0-m)
:type l2: numpy.array or similar
:returns: integer vector expressing states of both l1 and l2
"""
N = np.max(l1) + 1
return l2 * N + l1
| 5,342,667
|
def dump_refs(args):
"""
Output a list of all repositories along with their
checked out branches and their hashes.
"""
man = load_manifest()
first = True
for (name, project) in man.projects.items():
if not first: print()
first = False
print("Project %s:" % name)
repo = project.git_repo
print(" HEAD: %s" % repo.rev_parse("HEAD"))
print(" Symbolic: %s" % repo.current_branch())
project_status(project, indent=2)
repo = get_manifest_repo()
if repo:
try:
repo_branch = repo.current_branch()
except Exception as ex:
trace.Trace("Failed to get current branch for %s: %s" % (repo, ex))
return
print()
print("Manifest repo:")
print(" HEAD: %s" % repo.rev_parse("HEAD"))
print(" Symbolic: %s" % repo_branch)
repo_status(repo,
repo_branch,
"origin/" + repo_branch,
indent=2)
check_dirty_repo(repo, indent=2)
| 5,342,668
|
def parse(asset, image_data, product):
""" Parses the GEE metadata for ODC use.
Args:
asset (str): the asset ID of the product in the GEE catalog.
image_data (dict): the image metadata to parse.
product (datacube.model.DatasetType): the product information from the ODC index.
Returns: a namedtuple of the data required by ODC for indexing.
"""
bands = tuple(zip(product.measurements, image_data['bands']))
_id = str(uuid.uuid5(uuid.NAMESPACE_URL, f'EEDAI:{product.name}/{image_data["name"]}'))
creation_dt = image_data['startTime']
spatial_reference = image_data['bands'][0]['grid']\
.get('crsCode', image_data['bands'][0]['grid'].get('crsWkt'))
# Handle special GEE Infinity GeoJSON responses
image_data['geometry']['coordinates'][0] = [[float(x), float(y)]
for (x, y) \
in image_data['geometry']['coordinates'][0]]
geometry = Geometry(image_data['geometry'])
grids = [band['grid'] for band in image_data['bands']]
grids_copy = grids.copy()
grids = list(filter(lambda grid:
grids_copy.pop(grids_copy.index(grid)) \
not in grids_copy, grids))
shapes = [[grid['dimensions']['height'], grid['dimensions']['width']] \
for grid in grids]
affine_values = [list(grid['affineTransform'].values()) \
for grid in grids]
transforms = [list(Affine(affine_value[0], 0, affine_value[1],
affine_value[2], 0, affine_value[3]))\
for affine_value in affine_values]
bands = tuple(zip(product.measurements,
image_data['bands']))
metadata = Metadata(id=_id,
product=product.name,
creation_dt=creation_dt,
format='GeoTIFF',
platform=product.metadata_doc['properties'].get('eo:platform'),
instrument=product.metadata_doc['properties'].get('eo:instrument'),
from_dt=creation_dt,
to_dt=creation_dt,
center_dt=creation_dt,
asset=asset,
geometry=geometry,
shapes=shapes,
transforms=transforms,
grids=grids,
spatial_reference=spatial_reference,
path=f'EEDAI:{image_data["name"]}:',
bands=bands)
return metadata
| 5,342,669
|
def install_conan_dependencies(build_type: str, *additional_args: Iterable[str]):
"""Install dependent Conan packages.
Args:
build_type (str): Type of build.
additional_args (List[str]): Additional command line arguments.
"""
build_path = make_build_path(build_type)
os.makedirs(build_path, exist_ok=True)
command = (
[
"conan",
"install",
"--build",
"missing",
"-s",
"build_type=" + build_type,
"-o",
"requirements_for_tests=True",
]
+ list(additional_args)
+ [
"../..",
]
)
print(f"> run command: {command}")
result = subprocess.run(command, check=False, cwd=build_path)
exit(result.returncode)
| 5,342,670
|
def scale(pix, pixMax, floatMin, floatMax):
""" scale takes in
pix, the CURRENT pixel column (or row)
pixMax, the total # of pixel columns
floatMin, the min floating-point value
floatMax, the max floating-point value
scale returns the floating-point value that
corresponds to pix
"""
return (pix / pixMax) * (floatMax - floatMin) + floatMin
| 5,342,671
|
def most_distinct(df):
"""
:param df: data frame
:return:
"""
headers = df.columns.values
dist_list = [] # list of distinct values per list
for idx, col_name in enumerate(headers):
col = df[col_name]
col_list = col.tolist()
# if len(col_list) == 0:
# dist_list.append(-1)
# continue
avg_token_size = sum([len(str(a)) for a in col_list]) * 1.0 / len(col_list)
if avg_token_size < 4:
dist_list.append(-1)
else:
nums = get_numerics_from_list(col_list)
if nums is None:
dist_list.append(len(set(col_list)))
else:
dist_list.append(-1)
max_num = max(dist_list)
if max_num == -1 or max_num == 0:
return -1
for i, c in enumerate(dist_list):
if c == max_num:
return i
| 5,342,672
|
def walk_trees(store, tree_ids,prune_identical=False):
"""Recursively walk all the entries of N trees.
Iteration is depth-first pre-order, as in e.g. os.walk.
:param store: An ObjectStore for looking up objects.
:param trees: iterable of SHAs for N trees
:param prune_identical: If True, identical subtrees will not be walked.
:return: Iterator over tuple contsining N TreeEntry objects for each of entries
in the trees and their subtrees recursively. If an entry exists in one
tree but not the other, the other entry will have all attributes set
to None. If neither entry's path is None, they are guaranteed to
match.
"""
# This could be fairly easily generalized to >2 trees if we find a use
# case.
modes= [tree_id and stat.S_IFDIR or None for tree_id in tree_ids]
todo = [[TreeEntry(b'', mode, tree_id) for mode,tree_id in zip(modes,tree_ids)]]
while todo:
entries = todo.pop()
is_trees = [_is_tree(entry) for entry in entries]
if prune_identical and all(is_trees) and all_eq(entries):
continue
trees = [is_tree and store[entry.sha] or None for is_tree,entry in zip(is_trees,entries)]
path = first_nonempty([entry.path for entry in entries])
todo.extend(reversed(_merge_entries(path, trees)))
yield tuple(entries)
| 5,342,673
|
def make_set(value):
"""
Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list.
:param value:
:return:
"""
if isinstance(value, list):
value = set(value)
elif not isinstance(value, set):
value = set([value])
return value
| 5,342,674
|
def import_from_file(pipe, src):
"""
Basic example how to import from file or file-like object opened in binary mode
"""
if not hasattr(src, 'read'):
src = open(src, 'rb')
shutil.copyfileobj(src, pipe, 65535)
| 5,342,675
|
def _get_parent_cache_dir_url():
"""Get parent cache dir url from `petastorm.spark.converter.parentCacheDirUrl`
We can only set the url config once.
"""
global _parent_cache_dir_url # pylint: disable=global-statement
conf_url = _get_spark_session().conf \
.get(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, None)
if conf_url is None:
raise ValueError(
"Please set the spark config {}.".format(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF))
conf_url = normalize_dir_url(conf_url)
_check_parent_cache_dir_url(conf_url)
_parent_cache_dir_url = conf_url
logger.info(
'Read %s %s', SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, _parent_cache_dir_url)
return _parent_cache_dir_url
| 5,342,676
|
def make_variable(data, variances=None, **kwargs):
"""
Make a Variable with default dimensions from data
while avoiding copies beyond what sc.Variable does.
"""
if isinstance(data, (list, tuple)):
data = np.array(data)
if variances is not None and isinstance(variances, (list, tuple)):
variances = np.array(variances)
if isinstance(data, np.ndarray):
dims = ['x', 'y'][:np.ndim(data)]
return sc.array(dims=dims, values=data, variances=variances, **kwargs)
return sc.scalar(data, **kwargs)
| 5,342,677
|
def print_emails(emails: list):
"""
Prints the email info for emails in a list.
:param emails: A list of emails to print.
"""
counts = collections.Counter([email.subject for email in emails])
size = bytesto(sum(email.size for email in emails), "m")
for email in emails:
print(f"{email.subject} - {email.sender} - {email.size} bytes")
print(f"Total size: {size} MB")
| 5,342,678
|
def run_energy(save_figure: bool = False):
"""Run energy workflow."""
# define a molecule
mol = Molecule()
# make it by hand
mol.add_atom("C", 0, 0, 0)
mol.add_atom("O", 1.2, 0, 0)
# or read it from existing file
# mol.readXYZfile('CO.xyz')
# get a DFTGWBSE object
dft = DFTGWBSE(mol)
# change basis sets to a smaller one
dft.options.basisset = 'def2-svp'
dft.options.auxbasisset = 'aux-def2-svp'
# run for the molecule
# dft.run(mol)
# only needed, if no run was performed but an existing HDF5 is read
dft.mol.read_orb('pyvotca/examples/example.orb')
# Getting the plotting functions
viz = Visualization(dft.mol, save_figure=save_figure)
# plotting QP corrections
# viz.plot_qp_corrections()
# plotting absorption spectrum
viz.plot_absorption_gaussian()
| 5,342,679
|
def create_subtasks(task, qs, chunk_size, countdown=None, task_args=None):
"""
Splits a task depending on a queryset into a bunch of subtasks of the
specified chunk_size, passing a chunked queryset and optional additional
arguments to each."""
if task_args is None:
task_args = ()
job = group([
task.subtask(args=(chunk,) + task_args)
for chunk in chunked(qs, chunk_size)
])
if countdown is not None:
job.apply_async(countdown=countdown)
else:
job.apply_async()
| 5,342,680
|
def gh_event_repository(db, repo, payload, actor):
"""Process GitHub RepositoryEvent (with commits)
https://developer.github.com/v3/activity/events/types/#repositoryevent
:param db: Database to store repository data
:type db: ``flask_sqlalchemy.SQLAlchemy``
:param repo: Repository related to event
:type repo: ``repocribro.models.Repository``
:param payload: Data about repository and action
:type payload: dict
:param actor: Actor doing the event
:type actor: dict
"""
action = payload['action']
if action == 'privatized':
repo.private = True
repo.visibility_type = Repository.VISIBILITY_PRIVATE
elif action == 'publicized':
repo.private = False
repo.visibility_type = Repository.VISIBILITY_PUBLIC
elif action == 'deleted':
# TODO: consider some signalization of not being @GitHub anymore
repo.webhook_id = None
repo.visibility_type = Repository.VISIBILITY_PRIVATE
| 5,342,681
|
def dt642epoch(dt64):
"""
Convert numpy.datetime64 array to epoch time
(seconds since 1/1/1970 00:00:00)
Parameters
----------
dt64 : numpy.datetime64
Single or array of datetime64 object(s)
Returns
-------
time : float
Epoch time (seconds since 1/1/1970 00:00:00)
"""
return dt64.astype('datetime64[ns]').astype('float') / 1e9
| 5,342,682
|
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits):
"""
Calculate the rpn loss for one FPN layer for a single image.
The ground truth(GT) anchor labels and anchor boxes has been preprocessed to fit
the dimensions of FPN feature map. The GT boxes are encoded from fast-rcnn paper
https://arxiv.org/abs/1506.01497 page 5.
Args:
anchor_labels: GT anchor labels, H_feature x W_feature x NA
anchor_boxes: GT boxes for each anchor, H_feature x W_feature x NA x 4, encoded
label_logits: label logits from the rpn head, H_feature x W_feature x NA
box_logits: box logits from the rpn head, H_feature x W_feature x NA x 4
Returns:
label_loss, box_loss
"""
with tf.device('/cpu:0'):
valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1))
pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1))
nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor')
nr_pos = tf.identity(tf.count_nonzero(pos_mask, dtype=tf.int32), name='num_pos_anchor')
# nr_pos is guaranteed >0 in C4. But in FPN. even nr_valid could be 0.
valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask)
valid_label_logits = tf.boolean_mask(label_logits, valid_mask)
# with tf.name_scope('label_metrics'):
# valid_label_prob = tf.nn.sigmoid(valid_label_logits)
# summaries = []
# with tf.device('/cpu:0'):
# for th in [0.5, 0.2, 0.1]:
# valid_prediction = tf.cast(valid_label_prob > th, tf.int32)
# nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction')
# pos_prediction_corr = tf.count_nonzero(
# tf.logical_and(
# valid_label_prob > th,
# tf.equal(valid_prediction, valid_anchor_labels)),
# dtype=tf.int32)
# placeholder = 0.5 # A small value will make summaries appear lower.
# recall = tf.cast(tf.truediv(pos_prediction_corr, nr_pos), tf.float32)
# recall = tf.where(tf.equal(nr_pos, 0), placeholder, recall, name='recall_th{}'.format(th))
# precision = tf.cast(tf.truediv(pos_prediction_corr, nr_pos_prediction), tf.float32)
# precision = tf.where(tf.equal(nr_pos_prediction, 0),
# placeholder, precision, name='precision_th{}'.format(th))
# summaries.extend([precision, recall])
# add_moving_summary(*summaries)
# Per-level loss summaries in FPN may appear lower due to the use of a small placeholder.
# But the total RPN loss will be fine. TODO make the summary op smarter
placeholder = 0.
label_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(valid_anchor_labels, tf.float32), logits=valid_label_logits)
label_loss = tf.reduce_sum(label_loss) * (1. / cfg.RPN.BATCH_PER_IM)
label_loss = tf.where(tf.equal(nr_valid, 0), placeholder, label_loss, name='label_loss')
pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask)
pos_box_logits = tf.boolean_mask(box_logits, pos_mask)
delta = 1.0 / 9
box_loss = tf.losses.huber_loss(
pos_anchor_boxes, pos_box_logits, delta=delta,
reduction=tf.losses.Reduction.SUM) / delta
box_loss = box_loss * (1. / cfg.RPN.BATCH_PER_IM)
box_loss = tf.where(tf.equal(nr_pos, 0), placeholder, box_loss, name='box_loss')
# add_moving_summary(label_loss, box_loss, nr_valid, nr_pos)
return [label_loss, box_loss]
| 5,342,683
|
def calc_val_resize_value(input_image_size=(224, 224),
resize_inv_factor=0.875):
"""
Calculate image resize value for validation subset.
Parameters:
----------
input_image_size : tuple of 2 int
Main script arguments.
resize_inv_factor : float
Resize inverted factor.
Returns:
-------
int
Resize value.
"""
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return resize_value
| 5,342,684
|
def fetch_http(url, location):
"""
Return a `Response` object built from fetching the content at a HTTP/HTTPS based `url` URL string
saving the content in a file at `location`
"""
r = requests.get(url)
with open(location, 'wb') as f:
f.write(r.content)
content_type = r.headers.get('content-type')
size = r.headers.get('content-length')
size = int(size) if size else None
resp = Response(location=location, content_type=content_type, size=size, url=url)
return resp
| 5,342,685
|
def youtube_dl(url, uid):
"""
Modd i'r defnyddwyr defnyddio dolen yn lle ffeiliau.
Lawrlwytho ffeil o gwasanaeth streamio fel YouTube fel wav efo enw uuid4
rhedeg transcribe_audio() yn syth
"""
cmd = "youtube-dl -x --audio-format wav " + url + \
" -o data/" + uid + ".wav"
os.system(cmd)
cmd = "ffmpeg -i data/"+uid + \
".wav -acodec pcm_s16le -ar 16000 -ac 1 data/" + uid+"_temp.wav"
os.system(cmd)
os.rename("data/"+uid+"_temp.wav", "data/"+uid+".wav")
transcribe_audio("data/"+uid+".wav")
transaction.commit()
| 5,342,686
|
def mat_normalize(mx):
"""Normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -0.5).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx).dot(r_mat_inv)
return mx
| 5,342,687
|
def runjcast(args):
"""
main look for jcast flow.
:param args: parsed arguments
:return:
"""
# Get timestamp for out files
now = datetime.datetime.now()
write_dir = os.path.join(args.out, 'jcast_' + now.strftime('%Y%m%d%H%M%S'))
os.makedirs(write_dir, exist_ok=True)
# Main logger setup
main_log = logging.getLogger('jcast')
main_log.propagate = False
main_log.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.join(write_dir, 'jcast_main.log'))
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
main_log.addHandler(fh)
#
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
ch.setFormatter(formatter)
main_log.addHandler(ch)
main_log.info(args)
main_log.info(__version__)
#
# Open the rMATS output file (MXE) here, rename the columns
#
assert os.path.exists(os.path.join(args.rmats_folder, 'MXE.MATS.JC.txt')), 'rMATS files not found, check directory.'
rmats_results = RmatsResults(rmats_dir=args.rmats_folder)
# Model read count cutoff
#
# Read the gtf file using the gtfpase package.
# Then write as a pandas data frame.
#
gtf = ReadAnnotations(args.gtf_file)
gtf.read_gtf()
#
# Read genome file into memory
#
genome = ReadGenome(args.genome)
#
# Model read count cutoff.
# TODO: move this to a separate class
#
if args.model:
main_log.info('The -m flag is set. The modeled read count will override -r --read values.')
# Make a numpy array of all junction SJC sum counts
rmats_results.get_junction_count_array()
pt, gmm, min_count = model.gaussian_mixture(sum_sjc_array=rmats_results.sum_sjc_array)
# Plot out the model
model.plot_model(sum_sjc_array=rmats_results.sum_sjc_array,
pt=pt,
gmm=gmm,
min_count=min_count,
write_dir=write_dir,
filename='model',
)
# If the m flag is not set, use the r argument value as min count
else:
min_count = args.read
#
# Main loop through every line of each of the five rMATS files to make junction object, then translate them
#
for rma in [rmats_results.rmats_mxe,
rmats_results.rmats_se,
rmats_results.rmats_ri,
rmats_results.rmats_a5ss,
rmats_results.rmats_a3ss,
]:
junctions = [Junction(**rma.iloc[i].to_dict()) for i in range(len(rma))]
translate_one_partial = partial(_translate_one,
gtf=gtf,
genome=genome,
args=args,
write_dir=write_dir,
pred_bound=min_count,
)
#
# Concurrent futures
#
# import concurrent.futures
# with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()-1) as pool:
# for i, f in enumerate(tqdm.tqdm(pool.map(
# translate_one_partial,
# junctions,
# ),
# total=len(junctions),
# desc='Processing {0} Junctions'.format(rma.jxn_type[0]),
# )):
# main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(junctions[i].junction_type,
# junctions[i].name,
# junctions[i].gene_symbol,
# junctions[i].gene_id,
# ))
# main_log.info(f)
#
# Single threaded for-loop
#
for jx in tqdm.tqdm(junctions,
total=len(junctions),
desc='Processing {0} Junctions'.format(rma.jxn_type[0]),
):
main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(jx.junction_type,
jx.name,
jx.gene_symbol,
jx.gene_id,
))
main_log.info(translate_one_partial(jx))
return True
| 5,342,688
|
def user_syntax_error(e, source_code):
"""Returns a representation of the syntax error for human consumption.
This is only meant for small user-provided strings. For input files,
prefer the regular Python format.
Args:
e: The SyntaxError object.
source_code: The source code.
Returns:
A multi-line error message, where the first line is the summary, and the
following lines explain the error in more detail.
"""
summary = 'Failed to parse Python-like source code ({msg}).'.format(
msg=e.msg or '<unknown reason>')
if e.text is None:
# Only output the source code.
return '\n'.join([summary, _indent(source_code)])
# Alternatively, we could use the middle two lines from
# traceback.format_exception_only(SyntaxError, e), but it isn't clear that
# this is an improvement in terms of maintainability. (e.g. we do not then
# control the indent, and if the format changes in the future the output
# becomes nonsense).
error_information = '\n'.join([
e.text.rstrip('\r\n'), # \n is added by ast.parse but not exec/eval.
' ' * (e.offset - 1) + '^', # note: offset is 1-based.
])
if '\n' in source_code:
return '\n'.join([
summary,
'',
'Source:',
_indent(source_code),
'',
'Location:',
_indent(error_information),
])
else:
return '\n'.join([summary, _indent(error_information)])
| 5,342,689
|
def check_comment(comment, changed):
""" Check the commit comment and return True if the comment is
acceptable and False if it is not."""
sections = re.match(COMMIT_PATTERN, comment)
if sections is None:
print(f"The comment \"{comment}\" is not in the recognised format.")
else:
indicator = sections.group(1)
if indicator == "M":
# Allow modification comments to have practically any format
return True
elif indicator == "A" or indicator == "P":
if not changed:
print(
"You have indicated that you have added or removed a rule, but no changes were initially noted by "
"the repository.")
else:
address = sections.group(4)
if not valid_url(address):
print("Unrecognised address \"{address}\".".format(address=address))
else:
# The user has changed the subscription and has written a suitable comment
# message with a valid address
return True
print()
return False
| 5,342,690
|
def test_remove_group_dependency(job_scripts):
"""add and remove jobgroup dependency."""
jg1 = pyani_jobs.JobGroup("1d-sweep", "cat", arguments=job_scripts[0].params)
jg2 = pyani_jobs.JobGroup("2d-sweep", "myprog", arguments=job_scripts[1].params)
jg2.add_dependency(jg1)
dep = jg2.dependencies[0]
jg2.remove_dependency(dep)
assert 0 == len(jg2.dependencies)
| 5,342,691
|
def GetBankTaskSummary(bank_task):
""" Summarizes the bank task
params: bank_task = value of the object of type bank_task_t
returns: String with summary of the type.
"""
format_str = "{0: <#020x} {1: <16d} {2: <#020x} {3: <16d} {4: <16d} {5: <16d} {6: <16d} {7: <16d}"
out_string = format_str.format(bank_task, bank_task.bt_proc_persona.pid, bank_task.bt_ledger, unsigned(bank_task.bt_elem.be_refs), unsigned(bank_task.bt_elem.be_made), bank_task.bt_proc_persona.persona_id, bank_task.bt_proc_persona.uid, bank_task.bt_proc_persona.gid)
#if DEVELOPMENT
format_str = "{0: <#020x} {1: <20s}"
if hasattr(bank_task.bt_elem, 'be_task'):
out_string += " " + format_str.format(bank_task.bt_elem.be_task, GetProcNameForTask(bank_task.bt_elem.be_task))
#endif
return out_string
| 5,342,692
|
def deploy(environment):
"""Deploy to defined environment."""
token = get_token()
if not token:
click.echo("no token found - running configure")
configure()
token = get_token()
click.confirm(f"Are you sure you want to deploy {environment}?", abort=True)
if environment == "production":
result = deploy_production(token)
elif environment == "staging":
result = deploy_staging(token)
if result.status_code != 204:
click.echo(
f"There was an error deploying {environment}, please check your token."
)
else:
click.echo(f"Successfully started deployment for {environment}")
| 5,342,693
|
def main():
"""Main Function Refrences all other functions and accepts user input"""
db.connect()
menu.main_menu()
while True:
command_main = input("Please choose a menu: ").rstrip()
if command_main == "script":
menu.display_menu()
while True:
command_script = input("Enter Command: ").rstrip()
if command_script == "main":
misc_func.powershell()
misc_func.remove_bom()
db.logs()
elif command_script == "delete":
misc_func.delete_disabled()
elif command_script == "refresh":
misc_func.refresh_list()
elif command_script == "disabled":
misc_func.get_disabled()
elif command_script == "read":
misc_func.read_text()
elif command_script == "back":
menu.main_menu()
break
continue
elif command_main == "query":
menu.display_query_menu()
while True:
command_query = input("Choose a query: ").rstrip()
if command_query == "all":
print("Filler Text")
elif command_query == "os":
misc_func.format_select_os()
elif command_query == "delete":
db.delete_computer()
elif command_query == "test":
db.test()
elif command_query == "back":
menu.main_menu()
break
continue
elif command_main == "exit":
break
else:
print("Not a Valid Command")
menu.main_menu()
db.close()
print("The application has closed succesfully")
| 5,342,694
|
def angle(o1,o2):
"""
Find the angles between two DICOM orientation vectors
"""
o1 = np.array(o1)
o2 = np.array(o2)
o1a = o1[0:3]
o1b = o1[3:6]
o2a = o2[0:3]
o2b = o2[3:6]
norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)
norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)
dot_a = np.dot(o1a,o2a) / norm_a
dot_b = np.dot(o1b,o2b) / norm_b
if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:
dot_a = 1.0
if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:
dot_b = 1.0
angle_a = np.arccos(dot_a) * (180.0 / np.pi)
angle_b = np.arccos(dot_b) * (180.0 / np.pi)
return (angle_a, angle_b)
| 5,342,695
|
def get_project_id_v3(user_section='user'):
"""Returns a project ID."""
r = authenticate_v3_config(user_section, scoped=True)
return r.json()["token"]["project"]["id"]
| 5,342,696
|
def get_platform_arches(pkgs_info, pkg_name):
"""."""
package_info = get_package_info(pkgs_info, pkg_name)
platforms_info = package_info.get('platforms', {})
platform_arches = platforms_info.get('arches', [])
return platform_arches
| 5,342,697
|
def get_preview_images_by_proposal(proposal):
"""Return a list of preview images available in the filesystem for
the given ``proposal``.
Parameters
----------
proposal : str
The five-digit proposal number (e.g. ``88600``).
Returns
-------
preview_images : list
A list of preview images available in the filesystem for the
given ``proposal``.
"""
preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, 'jw{}'.format(proposal), '*'))
preview_images = [os.path.basename(preview_image) for preview_image in preview_images]
return preview_images
| 5,342,698
|
def unformat_number(new_str: str, old_str: Optional[str], type_: str) -> str:
"""Undoes some of the locale formatting to ensure float(x) works."""
ret_ = new_str
if old_str is not None:
if type_ in ("int", "uint"):
new_str = new_str.replace(",", "")
new_str = new_str.replace(".", "")
ret_ = new_str
else:
end_comma = False
if new_str.endswith(",") or new_str.endswith("."):
# Si acaba en coma, lo guardo
end_comma = True
ret_ = new_str.replace(",", "")
ret_ = ret_.replace(".", "")
if end_comma:
ret_ = ret_ + "."
# else:
# comma_pos = old_str.find(".")
# if comma_pos > -1:
print("Desformateando", new_str, ret_)
# else:
# pos_comma = old_str.find(".")
# if pos_comma > -1:
# if pos_comma > new_str.find("."):
# new_str = new_str.replace(".", "")
# ret_ = new_str[0:pos_comma] + "." + new_str[pos_comma:]
# print("l2", ret_)
return ret_
| 5,342,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.