content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def hashed_class_mix_score256(cycle_hash: bytes, identifier: bytes, ip: str, ip_bytes: bytearray) -> int:
"""
Nyzo Score computation from hash of IP start + end of last IP byte to effectively reorder the various c-class and their gaps.
Then complete the score with first half latest IP byte.
That last IP half byte is shuffled from a permutation map, built from cycle hash, so that start of block and end of block ip do not get more odds.
Should be similar to first picking a single random class (c-class+end of ip) from the different classes, then picking a single block prefix from these class
"""
score = sys.maxsize
if ip == '':
return score
ip_bytes = bytearray(socket.inet_aton(ip))
# seed = cycle_hash + ip_bytes[:3] + (ip_bytes[3] & 15).to_bytes(1, byteorder='big') # one c-class + last 4 bits = one seed
ip_bytes[3] = ip_bytes[3] & 15 # Mask first 4 bits of last byte.
seed = cycle_hash + ip_bytes
hashed_c = sha256(seed).digest()
score = 0
for i in range(32):
# Do we need all 32 bytes? more information than the ip entropy we fed (3.5 bytes). Way faster with only 16? => 30% gain
score += abs(cycle_hash[i] - hashed_c[i])
# score = sum(abs(r - h) for r,h in zip(cycle_hash, hashed_c)) # Slightly slower
score *= 16
# Up until there, score is the same for all ips of the same class
score += abs(SHUFFLE_MAP[ip_bytes[3]//16] - cycle_hash[0]//16)
# shuffle map so lower and highest ips parts do not get more odds
return score
| 22,800
|
def test_configuration_files(SystemInfo, File):
"""
Test if configuration settings added
"""
config_file_path = ''
if SystemInfo.distribution == 'ubuntu':
config_file_path = '/opt/mongodb/mms/conf/conf-mms.properties'
config_file = File(config_file_path)
assert config_file.contains('mms.centralUrl=http://localhost:8080')
assert config_file.contains('mms.fromEmailAddr=foo@bar.org')
assert config_file.contains('mms.replyToEmailAddr=foo@bar.org')
assert config_file.contains('mms.adminEmailAddr=foo@bar.org')
assert config_file.contains('mms.emailDaoClass=com.xgen.svc.core.dao.email.JavaEmailDao')
assert config_file.contains('mms.mail.transport=smtp')
assert config_file.contains('mms.mail.hostname=localhost')
assert config_file.contains('mms.mail.port=25')
| 22,801
|
def test_pickle():
"""This test refers to PR #4 which adds pickle support."""
original_table = Table({'a': [1, 2, 3], 'b': [4, 5, 6]})
serialized_table = pickle.dumps(original_table)
deserialized_table = pickle.loads(serialized_table)
assert original_table.keys == deserialized_table.keys
assert np.array_equal(original_table.data, deserialized_table.data)
assert np.array_equal(original_table.index, deserialized_table.index)
| 22,802
|
def processShowListFile(suppliedFile):
"""Search for each show entry in file line by line."""
with open(suppliedFile) as f:
for line in f:
lineContents = line.split()
if len(lineContents) < 3:
try:
print "%s entry is incorrectly formatted. Likely \
missing season or episode numbers." % lineContents[1]
except IndexError:
print "Catastrophic error reading line from %s" % suppliedFile
else:
try: # catch a missing season and insert '00' season place holder
lineContents[2]
except IndexError:
lineContents.insert(1, '00')
# append a zero before single digit episode and season numbers
for i in range(1, 3):
if len(lineContents[i]) < 2:
lineContents[i] = '0' + lineContents[i]
title, season, episode = lineContents[0:3]
singleEntry(title, season, episode)
time.sleep(60)
| 22,803
|
def distance_vinchey(f, a, start, end):
"""
Uses Vincenty formula for distance between two Latitude/Longitude points
(latitude,longitude) tuples, in numeric degrees. f,a are ellipsoidal parameters
Returns the distance (m) between two geographic points on the ellipsoid and the
forward and reverse azimuths between these points. Returns ( s, alpha12, alpha21 ) as a tuple
"""
# Convert into notation from the original paper
# http://www.anzlic.org.au/icsm/gdatum/chapter4.html
#
# Vincenty's Inverse formulae
# Given: latitude and longitude of two points (phi1, lembda1 and phi2, lembda2)
phi1 = math.radians(start[0]); lembda1 = math.radians(start[1]);
phi2 = math.radians(end[0]); lembda2 = math.radians(end[1]);
if (abs( phi2 - phi1 ) < 1e-8) and ( abs( lembda2 - lembda1) < 1e-8 ):
return 0.0, 0.0, 0.0
two_pi = 2.0*math.pi
b = a * (1.0 - f)
TanU1 = (1-f) * math.tan( phi1 )
TanU2 = (1-f) * math.tan( phi2 )
U1 = math.atan(TanU1)
U2 = math.atan(TanU2)
lembda = lembda2 - lembda1
last_lembda = -4000000.0 # an impossibe value
omega = lembda
# Iterate the following equations, until there is no significant change in lembda
while ( last_lembda < -3000000.0 or lembda != 0 and abs( (last_lembda - lembda)/lembda) > 1.0e-9 ) :
sqr_sin_sigma = pow( math.cos(U2) * math.sin(lembda), 2) + \
pow( (math.cos(U1) * math.sin(U2) - \
math.sin(U1) * math.cos(U2) * math.cos(lembda) ), 2 )
Sin_sigma = math.sqrt( sqr_sin_sigma )
Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda)
sigma = math.atan2( Sin_sigma, Cos_sigma )
Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma)
alpha = math.asin( Sin_alpha )
Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2) )
C = (f/16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2)))
last_lembda = lembda
lembda = omega + (1-C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \
(Cos2sigma_m + C * math.cos(sigma) * (-1 + 2 * pow(Cos2sigma_m, 2) )))
u2 = pow(math.cos(alpha),2) * (a*a-b*b) / (b*b)
A = 1 + (u2/16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
B = (u2/1024) * (256 + u2 * (-128+ u2 * (74 - 47 * u2)))
delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B/4) * \
(Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2) ) - \
(B/6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \
(-3 + 4 * pow(Cos2sigma_m,2 ) )))
s = b * A * (sigma - delta_sigma)
alpha12 = math.atan2( (math.cos(U2) * math.sin(lembda)), \
(math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda)))
alpha21 = math.atan2( (math.cos(U1) * math.sin(lembda)), \
(-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda)))
if ( alpha12 < 0.0 ) :
alpha12 = alpha12 + two_pi
if ( alpha12 > two_pi ) :
alpha12 = alpha12 - two_pi
alpha21 = alpha21 + two_pi / 2.0
if ( alpha21 < 0.0 ) :
alpha21 = alpha21 + two_pi
if ( alpha21 > two_pi ) :
alpha21 = alpha21 - two_pi
return s, alpha12, alpha21
| 22,804
|
def plotCTbs(bcount, idx):
"""
Plots data points, individual, and mean curve of both control and treatment group for a bootstrapping sample
:param bcount: index of bootstrapping sample
:param idx: index of the selection
"""
fdgrp0tme_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 0][sel])
fdgrp1tme_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 1]["tme"])
fdgrp1sel_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 1][sel])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = np.where(grp0_f == grp0[g0])
plt.plot(fdgrp0tme_arr[tmeIdx], fdgrp0sel_arr[:,idx][tmeIdx], color=cmap1(0), marker='o', linestyle='')
plt.plot(iplT, ys0vHatbs[bcount][idx][g0], color=cmap1(0), linestyle='dashed')
for g1 in range(len(grp1)):
tmeIdx = np.where(grp1_f == grp1[g1])
plt.plot(fdgrp1tme_arr[tmeIdx], fdgrp1sel_arr[:,idx][tmeIdx], color=cmap1(1), marker='o', linestyle='')
plt.plot(iplT, ys1vHatbs[bcount][idx][g1], color=cmap1(len(sel)), linestyle='dashed')
plt.plot(iplT, ys0mubs[bcount][idx], lw=3, color=cmap1(0))
plt.plot(iplT, ys1mubs[bcount][idx], lw=3, color=cmap1(1))
plt.show()
| 22,805
|
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
dets = np.array(dets)
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
#每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#按照score置信度降序排序
order = scores.argsort()[::-1]
keep = []
#保留的结果框集合
while order.size > 0:
i = order[0]
keep.append(i) #保留该类剩余box中得分最高的一个
#得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
#计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
#因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
| 22,806
|
def _romanize(word: str) -> str:
"""
:param str word: Thai word to be romanized, should have already been tokenized.
:return: Spells out how the Thai word should be pronounced.
"""
if not isinstance(word, str) or not word:
return ""
word = _replace_vowels(_normalize(word))
res = _RE_CONSONANT.findall(word)
# 2-character word, all consonants
if len(word) == 2 and len(res) == 2:
word = list(word)
word.insert(1, "o")
word = "".join(word)
word = _replace_consonants(word, res)
return word
| 22,807
|
def lookup_beatmap(beatmaps: list, **lookup):
""" Finds and returns the first beatmap with the lookup specified.
Beatmaps is a list of beatmap dicts and could be used with beatmap_lookup().
Lookup is any key stored in a beatmap from beatmap_lookup().
"""
if not beatmaps:
return None
for beatmap in beatmaps:
match = True
for key, value in lookup.items():
if key.lower() not in beatmap:
raise KeyError(f"The list of beatmaps does not have key: {key}")
if not beatmap[key].lower() == value.lower():
match = False
if match:
return beatmap
return None
| 22,808
|
def scott(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Scott similarity
Scott, W. A. (1955).
Reliability of content analysis: The case of nominal scale coding.
Public opinion quarterly, 321-325.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (4 * a * d - (b + c) ** 2) / ((2 * a + b + c) * (2 + d + b + c))
| 22,809
|
def write_coverage_file(coverage_file_path: Path, exit_code: int, coverage_content):
"""Write the formatted coverage to file."""
with open(coverage_file_path, "w", encoding="utf-8") as f:
if exit_code == 0:
f.write("## Coverage passed ✅\n\n")
else:
f.write("## Coverage failed ❌\n\n")
# Open collapsible section
f.write("<details><summary>Coverage details</summary>\n<p>\n\n")
f.write("```\n")
f.writelines(coverage_content)
# Close collapsible section
f.write("```\n\n")
f.write("</p>\n</details>\n\n")
| 22,810
|
def test_genesis_hash(genesis_fixture):
"""
py current: 7e2c3861f556686d7bc3ce4e93fa0011020868dc769838aca66bcc82010a2c60
fixtures 15.10.:f68067286ddb7245c2203b18135456de1fc4ed6a24a2d9014195faa7900025bf
py poc6: 08436a4d33c77e6acf013e586a3333ad152f25d31df8b68749d85046810e1f4b
fixtures 19.9,: 08436a4d33c77e6acf013e586a3333ad152f25d31df8b68749d85046810e1f4b
"""
genesis = blocks.genesis(new_db())
assert genesis.hex_hash() == genesis_fixture['genesis_hash']
| 22,811
|
def mainmenu_choice(message):
"""Выбор пункта главного меню"""
choice = message.text
if choice == 'Скрин':
take_screenshot(message)
else:
BOT.send_message(message.chat.id, 'Неизвестная команда')
mainmenu(message)
| 22,812
|
def get_test_runner():
"""
Returns a test runner instance for unittest.main. This object captures
the test output and saves it as an xml file.
"""
try:
import xmlrunner
path = get_test_dir()
runner = xmlrunner.XMLTestRunner(output=path)
return runner
except Exception, e:
print("get_test_runner error: %s" % e)
return None
| 22,813
|
def _construct_capsule(geom, pos, rot):
"""Converts a cylinder geometry to a collider."""
radius = float(geom.get('radius'))
length = float(geom.get('length'))
length = length + 2 * radius
return config_pb2.Collider(
capsule=config_pb2.Collider.Capsule(radius=radius, length=length),
rotation=_vec(euler.quat2euler(rot, 'rxyz'), scale=180 / np.pi),
position=_vec(pos))
| 22,814
|
def test_locked_final(tmpdir):
"""The auto-patch workflow is interrupted by a persistent lock,
auto-patch eventually gives up waiting.
"""
with tmpdir.as_cwd():
caller = AutoPatchCaller.get_caller("locked_final", config=no_wait)
caller.run()
caller.check_report()
| 22,815
|
def file_root_dir(tmpdir_factory):
"""Prepares the testing dirs for file tests"""
root_dir = tmpdir_factory.mktemp('complex_file_dir')
for file_path in ['file1.yml',
'arg/name/file2',
'defaults/arg/name/file.yml',
'defaults/arg/name/file2',
'vars/arg/name/file1.yml',
'vars/arg/name/file3.yml',
'vars/arg/name/nested/file4.yml']:
root_dir.join(file_path).ensure()
return root_dir
| 22,816
|
def path_nucleotide_length(g: BifrostDiGraph, path: Iterable[Kmer]) -> int:
"""Compute the length of a path in nucleotides."""
if not path:
return 0
node_iter = iter(path)
start = next(node_iter)
k = g.graph['k']
length = g.nodes[start]['length'] + k - 1
prev = start
for n in node_iter:
if (prev, n) not in g.edges:
raise ValueError(f"Invalid path specified, ({prev}, {n}) is not an edge.")
length += g.nodes[n]['length']
prev = n
return length
| 22,817
|
def extract_static_links(page_content):
"""Deliver the static asset links from a page source."""
soup = bs(page_content, "html.parser")
static_js = [
link.get("src")
for link in soup.findAll("script")
if link.get("src") and "static" in link.get("src")
]
static_images = [
image.get("src")
for image in soup.findAll("img")
if image.get("src") and "static" in image.get("src")
]
static_css = [
link.get("href")
for link in soup.findAll("link")
if link.get("href") and "static" in link.get("href")
]
return static_js + static_images + static_css
| 22,818
|
def zero_inflated_nb(n, p, phi=0, size=None):
"""Models a zero-inflated negative binomial
Something about hte negative binomail model here...
This basically just wraps the numpy negative binomial generator,
where the probability of a zero is additionally inflated by
some probability, psi...
Parameters
----------
n : int
Parameter, > 0.
p : float
Parameter, 0 <= p <= 1.
phi : float, optional
The probability of obtaining an excess zero in the model,
where 0 <= phi <= 1. When `phi = 0`, the distribution collapses
to a negative binomial model.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
int or ndarray of ints
Drawn samples
Also See
--------
np.random.negative_binomial
References
----------
..[1] Kutz, Z.D. et al. (2015) "Sparse and Compositionally Robust Inference
of Microbial Ecological Networks." PLoS Compuational Biology. 11: e10004226
http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004226
"""
zeros = (np.random.binomial(1, phi, size) == 1)
nb_ = np.random.negative_binomial(n, p, size=size)
nb_[zeros] = 0
return nb_
| 22,819
|
def test_missing_properties():
# pylint: disable=line-too-long
"""Test that ValueError is raised if an embedded representation does not have properties attribute.
1. Create an embedded representation marshaler for an object without properties attribute.
2. Try to call marshal_properties method.
3. Check that ValueError is raised.
4. Check the error message.
"""
# pylint: enable=line-too-long
marshaler = RepresentationMarshaler(
marshaler=JSONMarshaler(),
embedded_representation=object(),
)
with pytest.raises(ValueError) as error_info:
marshaler.marshal_properties()
expected_message = "Failed to get properties of the embedded representation"
assert error_info.value.args[0] == expected_message, "Wrong error"
| 22,820
|
async def get_bot_queue(
request: Request,
state: enums.BotState = enums.BotState.pending,
verifier: int = None,
worker_session = Depends(worker_session)
):
"""Admin API to get the bot queue"""
db = worker_session.postgres
if verifier:
bots = await db.fetch("SELECT bot_id, prefix, description FROM bots WHERE state = $1 AND verifier = $2 ORDER BY created_at ASC", state, verifier)
bots = await db.fetch("SELECT bot_id, prefix, description FROM bots WHERE state = $1 ORDER BY created_at ASC", state)
return {"bots": [{"user": await get_bot(bot["bot_id"]), "prefix": bot["prefix"], "invite": await invite_bot(bot["bot_id"], api = True), "description": bot["description"]} for bot in bots]}
| 22,821
|
def ask_for_rating():
"""Ask the user for a rating"""
heading = '{} {}'.format(common.get_local_string(30019),
common.get_local_string(30022))
try:
return int(xbmcgui.Dialog().numeric(heading=heading, type=0,
defaultt=''))
except ValueError:
return None
| 22,822
|
def exec_file(filename, globals=None, locals=None):
"""Execute the specified file, optionaly setup its context by using globals and locals."""
if globals is None:
globals = {}
if locals is None:
locals = globals
locals['__file__'] = filename
from py import path
from _pytest import config
from _pytest.assertion import rewrite
f = path.local(filename)
filename2 = os.path.relpath(filename, os.getcwd())
config = config._prepareconfig([], [])
_, code = rewrite._rewrite_test(config, f)
exec(code, globals, locals)
| 22,823
|
def isthai(text,check_all=False):
"""
สำหรับเช็คว่าเป็นตัวอักษรภาษาไทยหรือไม่
isthai(text,check_all=False)
text คือ ข้อความหรือ list ตัวอักษร
check_all สำหรับส่งคืนค่า True หรือ False เช็คทุกตัวอักษร
การส่งคืนค่า
{'thai':% อักษรภาษาไทย,'check_all':tuple โดยจะเป็น (ตัวอักษร,True หรือ False)}
"""
listext=list(text)
i=0
num_isthai=0
if check_all==True:
listthai=[]
while i<len(listext):
cVal = ord(listext[i])
if(cVal >= 3584 and cVal <= 3711):
num_isthai+=1
if check_all==True:
listthai.append(True)
else:
if check_all==True:
listthai.append(False)
i+=1
thai=(num_isthai/len(listext))*100
if check_all==True:
dictthai=tuple(zip(listext,listthai))
data= {'thai':thai,'check_all':dictthai}
else:
data= {'thai':thai}
return data
| 22,824
|
def _convert_object_array(
content: List[Scalar], dtype: Optional[DtypeObj] = None
) -> List[Scalar]:
"""
Internal function ot convert object array.
Parameters
----------
content: list of processed data records
dtype: np.dtype, default is None
Returns
-------
arrays: casted content if not object dtype, otherwise return as is in list.
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
| 22,825
|
def update_room_time(conn, room_name: str, req_time: int) -> int:
"""部屋のロックを取りタイムスタンプを更新する
トランザクション開始後この関数を呼ぶ前にクエリを投げると、
そのトランザクション中の通常のSELECTクエリが返す結果がロック取得前の
状態になることに注意 (keyword: MVCC, repeatable read).
"""
cur = conn.cursor()
# See page 13 and 17 in https://www.slideshare.net/ichirin2501/insert-51938787
cur.execute("INSERT INTO room_time(room_name, time) VALUES (%s, 0) ON DUPLICATE KEY UPDATE time = time",
(room_name,))
cur.execute("SELECT time FROM room_time WHERE room_name = %s FOR UPDATE", (room_name,))
room_time = cur.fetchone()[0]
current_time = get_current_time(conn)
if room_time > current_time:
raise RuntimeError(f"room_time is future: room_time={room_time}, req_time={req_time}")
if req_time and req_time < current_time:
raise RuntimeError(f"req_time is past: req_time={req_time}, current_time={current_time}")
cur.execute("UPDATE room_time SET time = %s WHERE room_name = %s", (current_time, room_name))
return current_time
| 22,826
|
def search_db_via_query(query):
"""Function that checks database for matching entries with user input.
The function takes the user input and adds it to the used sql command to search for matching entries in the provided database
if there are matching entries these will be printed in the python console
Args:
query (str): habitat name in sql, provided by the user
Returns:
table entries matching with user input
"""
connection = sqlite3.connect("Pflanzendaten.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM plants WHERE " + query)
content = cursor.fetchall()
print(tabulate((content), headers=['species', 'name', 'nativ', 'endangered', 'habitat', 'waterdepthmin', 'waterdepthmax', 'rootdepth', 'groundwatertablechange', 'floodheightmax', 'floodloss', 'floodduration']))
print('Status 1 equals nativ')
connection.close()
| 22,827
|
def get_layer_coverage(cat, store, store_obj):
"""Get correct layer coverage from a store."""
coverages = cat.mosaic_coverages(store_obj)
# Find the correct coverage
coverage = None
for cov in coverages["coverages"]["coverage"]:
if store == cov['name']:
coverage = cov
break
if coverage is None:
logger.warning("Layer '%s' not found", store)
return coverage
| 22,828
|
def test_bsplines(tmp_path, testnum):
"""Test idempotency of B-Splines interpolation + approximation."""
targetshape = (10, 12, 9)
# Generate an oblique affine matrix for the target - it will be a common case.
targetaff = nb.affines.from_matvec(
nb.eulerangles.euler2mat(x=0.9, y=0.001, z=0.001) @ np.diag((2, 3, 4)),
)
# Intendedly mis-centered (exercise we may not have volume-centered NIfTIs)
targetaff[:3, 3] = nb.affines.apply_affine(
targetaff, 0.5 * (np.array(targetshape) - 3)
)
# Generate some target grid
targetnii = nb.Nifti1Image(np.ones(targetshape), targetaff, None)
targetnii.to_filename(tmp_path / "target.nii.gz")
# Generate random coefficients
gridnii = bspline_grid(targetnii, control_zooms_mm=(4, 6, 8))
coeff = (np.random.random(size=gridnii.shape) - 0.5) * 500
coeffnii = nb.Nifti1Image(coeff.astype("float32"), gridnii.affine, gridnii.header)
coeffnii.to_filename(tmp_path / "coeffs.nii.gz")
os.chdir(tmp_path)
# Check that we can interpolate the coefficients on a target
test1 = Coefficients2Warp(
in_target=str(tmp_path / "target.nii.gz"),
in_coeff=str(tmp_path / "coeffs.nii.gz"),
pe_dir="j-",
ro_time=1.0,
).run()
# Approximate the interpolated target
test2 = BSplineApprox(
in_data=test1.outputs.out_field,
in_mask=str(tmp_path / "target.nii.gz"),
bs_spacing=[(4, 6, 8)],
recenter=False,
ridge_alpha=1e-4,
).run()
# Absolute error of the interpolated field is always below 5 Hz
assert np.all(np.abs(nb.load(test2.outputs.out_error).get_fdata()) < 5)
| 22,829
|
def retarget(songs, duration, music_labels=None, out_labels=None,
out_penalty=None, volume=None, volume_breakpoints=None,
springs=None, constraints=None,
min_beats=None, max_beats=None,
fade_in_len=3.0, fade_out_len=5.0,
**kwargs):
"""Retarget a song to a duration given input and output labels on
the music.
Suppose you like one section of a song, say, the guitar solo, and
you want to create a three minute long version of the solo.
Suppose the guitar solo occurs from the 150 second mark to the 200
second mark in the original song.
You can set the label the guitar solo with 'solo' and the rest of
the song with 'other' by crafting the ``music_labels`` input
function. And you can set the ``out_labels`` function to give you
nothing but solo::
def labels(t):
if 150 < t < 200:
return 'solo'
return 'other'
def target(t): return 'solo'
song = Song("sweet-rock-song.wav")
composition, info = retarget(song, 180,
music_labels=labels, out_labels=target)
composition.export(filename="super-long-solo")
You can achieve much more complicated retargetings by adjusting
the ``music_labels``, `out_labels` and ``out_penalty`` functions,
but this should give you a basic sense of how to use the
``retarget`` function.
:param song: Song to retarget
:type song: :py:class:`radiotool.composer.Song`
:param duration: Duration of retargeted song (in seconds)
:type duration: float
:param music_labels: A function that takes a time (in seconds) and
returns the label (str) of the input music at that time
:type music_labels: function
:param out_labels: A function that takes a time (in seconds) and
returns the desired label (str) of the output music at that
time
:type out_labels: function
:param out_penalty: A function that takes a time (in seconds) and
returns the penalty for not matching the correct output label
at that time (default is 1.0)
:type out_penalty: function
:returns: Composition of retargeted song, and dictionary of
information about the retargeting
:rtype: (:py:class:`radiotool.composer.Composition`, dict)
"""
# get song analysis
if isinstance(songs, Track):
songs = [songs]
multi_songs = len(songs) > 1
analyses = [s.analysis for s in songs]
# generate labels for every beat in the input and output
beat_lengths = [a[BEAT_DUR_KEY] for a in analyses]
beats = [a["beats"] for a in analyses]
beat_length = np.mean(beat_lengths)
logging.info("Beat lengths of songs: {} (mean: {})".
format(beat_lengths, beat_length))
if out_labels is not None:
target = [out_labels(i) for i in np.arange(0, duration, beat_length)]
else:
target = ["" for i in np.arange(0, duration, beat_length)]
if music_labels is not None:
if not multi_songs:
music_labels = [music_labels]
music_labels = [item for sublist in music_labels
for item in sublist]
if len(music_labels) != len(songs):
raise ArgumentException("Did not specify {} sets of music labels".
format(len(songs)))
start = [[music_labels[i](j) for j in b] for i, b in enumerate(beats)]
else:
start = [["" for i in b] for b in beats]
if out_penalty is not None:
pen = np.array([out_penalty(i) for i in np.arange(
0, duration, beat_length)])
else:
pen = np.array([1 for i in np.arange(0, duration, beat_length)])
# we're using a valence/arousal constraint, so we need these
in_vas = kwargs.pop('music_va', None)
if in_vas is not None:
if not multi_songs:
in_vas = [in_vas]
in_vas = [item for sublist in in_vas for item in sublist]
if len(in_vas) != len(songs):
raise ArgumentException("Did not specify {} sets of v/a labels".
format(len(songs)))
for i, in_va in enumerate(in_vas):
if callable(in_va):
in_va = np.array([in_va(j) for j in beats[i]])
in_vas[i] = in_va
target_va = kwargs.pop('out_va', None)
if callable(target_va):
target_va = np.array(
[target_va(i) for i in np.arange(0, duration, beat_length)])
# set constraints
if constraints is None:
min_pause_len = 20.
max_pause_len = 35.
min_pause_beats = int(np.ceil(min_pause_len / beat_length))
max_pause_beats = int(np.floor(max_pause_len / beat_length))
constraints = [(
rt_constraints.PauseConstraint(
min_pause_beats, max_pause_beats,
to_penalty=1.4, between_penalty=.05, unit="beats"),
rt_constraints.PauseEntryVAChangeConstraint(target_va, .005),
rt_constraints.PauseExitVAChangeConstraint(target_va, .005),
rt_constraints.TimbrePitchConstraint(
context=0, timbre_weight=1.5, chroma_weight=1.5),
rt_constraints.EnergyConstraint(penalty=0.5),
rt_constraints.MinimumLoopConstraint(8),
rt_constraints.ValenceArousalConstraint(
in_va, target_va, pen * .125),
rt_constraints.NoveltyVAConstraint(in_va, target_va, pen),
) for in_va in in_vas]
else:
max_pause_beats = 0
if len(constraints) > 0:
if isinstance(constraints[0], rt_constraints.Constraint):
constraints = [constraints]
pipelines = [rt_constraints.ConstraintPipeline(constraints=c_set)
for c_set in constraints]
trans_costs = []
penalties = []
all_beat_names = []
for i, song in enumerate(songs):
(trans_cost, penalty, bn) = pipelines[i].apply(song, len(target))
trans_costs.append(trans_cost)
penalties.append(penalty)
all_beat_names.append(bn)
logging.info("Combining tables")
total_music_beats = int(np.sum([len(b) for b in beats]))
total_beats = total_music_beats + max_pause_beats
# combine transition cost tables
trans_cost = np.ones((total_beats, total_beats)) * np.inf
sizes = [len(b) for b in beats]
idx = 0
for i, size in enumerate(sizes):
trans_cost[idx:idx + size, idx:idx + size] =\
trans_costs[i][:size, :size]
idx += size
trans_cost[:total_music_beats, total_music_beats:] =\
np.vstack([tc[:len(beats[i]), len(beats[i]):]
for i, tc in enumerate(trans_costs)])
trans_cost[total_music_beats:, :total_music_beats] =\
np.hstack([tc[len(beats[i]):, :len(beats[i])]
for i, tc in enumerate(trans_costs)])
trans_cost[total_music_beats:, total_music_beats:] =\
trans_costs[0][len(beats[0]):, len(beats[0]):]
# combine penalty tables
penalty = np.empty((total_beats, penalties[0].shape[1]))
penalty[:total_music_beats, :] =\
np.vstack([p[:len(beats[i]), :] for i, p in enumerate(penalties)])
penalty[total_music_beats:, :] = penalties[0][len(beats[0]):, :]
logging.info("Building cost table")
# compute the dynamic programming table (prev python method)
# cost, prev_node = _build_table(analysis, duration, start, target, pen)
# first_pause = 0
# if max_pause_beats > 0:
first_pause = total_music_beats
if min_beats is None:
min_beats = 0
elif min_beats is 'default':
min_beats = int(20. / beat_length)
if max_beats is None:
max_beats = -1
elif max_beats is 'default':
max_beats = int(90. / beat_length)
max_beats = min(max_beats, penalty.shape[1])
tc2 = np.nan_to_num(trans_cost)
pen2 = np.nan_to_num(penalty)
beat_names = []
for i, bn in enumerate(all_beat_names):
for b in bn:
if not str(b).startswith('p'):
beat_names.append((i, float(b)))
beat_names.extend([('p', i) for i in xrange(max_pause_beats)])
result_labels = []
logging.info("Running optimization (full backtrace, memory efficient)")
logging.info("\twith min_beats(%d) and max_beats(%d) and first_pause(%d)" %
(min_beats, max_beats, first_pause))
song_starts = [0]
for song in songs:
song_starts.append(song_starts[-1] + len(song.analysis["beats"]))
song_ends = np.array(song_starts[1:], dtype=np.int32)
song_starts = np.array(song_starts[:-1], dtype=np.int32)
t1 = time.clock()
path_i, path_cost = build_table_full_backtrace(
tc2, pen2, song_starts, song_ends,
first_pause=first_pause, max_beats=max_beats, min_beats=min_beats)
t2 = time.clock()
logging.info("Built table (full backtrace) in {} seconds"
.format(t2 - t1))
path = []
if max_beats == -1:
max_beats = min_beats + 1
first_pause_full = max_beats * first_pause
n_beats = first_pause
for i in path_i:
if i >= first_pause_full:
path.append(('p', i - first_pause_full))
result_labels.append(None)
# path.append('p' + str(i - first_pause_full))
else:
path.append(beat_names[i % n_beats])
song_i = path[-1][0]
beat_name = path[-1][1]
result_labels.append(
start[song_i][np.where(np.array(beats[song_i]) ==
beat_name)[0][0]])
# path.append(float(beat_names[i % n_beats]))
# else:
# print("Running optimization (fast, full table)")
# # this won't work right now- needs to be updated
# # with the multi-song approach
# # fortran method
# t1 = time.clock()
# cost, prev_node = build_table(tc2, pen2)
# t2 = time.clock()
# print("Built table (fortran) in {} seconds".format(t2 - t1))
# res = cost[:, -1]
# best_idx = N.argmin(res)
# if N.isfinite(res[best_idx]):
# path, path_cost, path_i = _reconstruct_path(
# prev_node, cost, beat_names, best_idx, N.shape(cost)[1] - 1)
# # path_i = [beat_names.index(x) for x in path]
# else:
# # throw an exception here?
# return None
# path = []
# result_labels = []
# if max_pause_beats == 0:
# n_beats = total_music_beats
# first_pause = n_beats
# else:
# n_beats = first_pause
# for i in path_i:
# if i >= first_pause:
# path.append(('p', i - first_pause))
# result_labels.append(None)
# else:
# path.append(beat_names[i % n_beats])
# song_i = path[-1][0]
# beat_name = path[-1][1]
# result_labels.append(
# start[song_i][N.where(N.array(beats[song_i]) ==
# beat_name)[0][0]])
# return a radiotool Composition
logging.info("Generating audio")
(comp, cf_locations, result_full_labels,
cost_labels, contracted, result_volume) =\
_generate_audio(
songs, beats, path, path_cost, start,
volume=volume,
volume_breakpoints=volume_breakpoints,
springs=springs,
fade_in_len=fade_in_len, fade_out_len=fade_out_len)
info = {
"beat_length": beat_length,
"contracted": contracted,
"cost": np.sum(path_cost) / len(path),
"path": path,
"path_i": path_i,
"target_labels": target,
"result_labels": result_labels,
"result_full_labels": result_full_labels,
"result_volume": result_volume,
"transitions": [Label("crossfade", loc) for loc in cf_locations],
"path_cost": cost_labels
}
return comp, info
| 22,830
|
def st_get_ipfs_cache_path(user_did):
"""
Get the root dir of the IPFS cache files.
:param user_did: The user DID
:return: Path: the path of the cache root.
"""
return _st_get_vault_path(user_did) / 'ipfs_cache'
| 22,831
|
def create_img_caption_int_data(filepath):
""" function to load captions from text file and convert them to integer
format
:return: dictionary with image ids and associated captions in int format
"""
print("\nLoading caption data : started")
# load caption data
img_caption_dict = load_img_caption_data(filepath)
# merge caption text data
text_data = " ".join([" ".join(txt) for txt in img_caption_dict.values()])
# create word to int mappings
(word_to_int_map, int_to_word_map) = create_word_mappings(text_data)
# convert caption data to int
img_caption_int_dict = {}
for key, value in img_caption_dict.items():
img_caption_int_dict[key] = [convert_text_to_int(txt, word_to_int_map)
for txt in value]
print("\nLoading caption data : completed")
return img_caption_int_dict
| 22,832
|
def process_submission(submission_id, chain=True):
"""
Handles the uploading of a submission.
"""
assert Submission.objects.filter(id=submission_id).count() > 0, "Submission {} does not exist!".format(submission_id)
assert SubmissionState.objects.filter(submission_id=submission_id).count() > 0, "SubmissionState {} does not exist!".format(submission_id)
submission = Submission.objects.get(id=submission_id)
state = SubmissionState.objects.get(submission_id=submission_id)
logger.info("Processing submission %s", submission_id)
if state.status != 'pending-upload':
logger.warning("Trying to process submission %s, but state is %s", submission.id, state.status)
return
try:
reader = MFileReader()
doc_ids = set(r.doc_id for r in db.select("SELECT doc_id FROM document_tag WHERE tag = %(tag)s", tag=submission.corpus_tag))
with gzip.open(submission.uploaded_filename, 'rt', encoding="utf-8") as f:
mfile = reader.parse(f, doc_ids=doc_ids, logger=logger)
api.upload_submission(submission_id, mfile)
# Update state of submission.
state.status = 'pending-sampling'
state.save()
if chain:
sample_submission.delay(submission_id, n_samples=500)
except Exception as e:
logger.exception(e)
state.status = 'error'
state.message = traceback.format_exc()
state.save()
| 22,833
|
def eigenvector_2d_symmetric(a, b, d, eig, eps=1e-8):
"""Returns normalized eigenvector corresponding to the provided eigenvalue.
Note that this a special case of a 2x2 symmetric matrix where every element of the matrix is passed as an image.
This allows the evaluation of eigenvalues to be vectorized over the entire image. This is much more efficient
than calling the numpy function for computing the eigenvectors for each pixel of the image.
This function solves:
| a-lambda b |
| b d-lambda | [x, y] = 0
Which means that:
bx = (lambda - d) y
or
y = (lambda - a)/b x
This solution is invalid for b == 0. Here we expect orthogonal vectors [1 0] and [0 1].
ax + by = l x
bx + dy = l y
so x = 1 iff b = 0 and l = a
and y = 1 iff b = 0 and l = d
"""
ex = np.zeros(a.shape)
ey = np.zeros(a.shape)
ex[np.abs(a - eig) < eps] = 1
ey[np.abs(d - eig) < eps] = 1
mask = np.abs(b) > eps
tx = b[mask]
ty = eig[mask] - a[mask]
length = np.sqrt(tx * tx + ty * ty)
tx = tx / length
ty = ty / length
ex[mask] = tx
ey[mask] = ty
return ex, ey
| 22,834
|
def write_func_tsvs(file_dataframe):
"""
Given a complete dataframe of files, writes appropriate tsvs for func files.
Parameters
----------
file_dataframe : DataFrame
DataFrame created by organize_files() containing metadata about each file
"""
for subject in file_dataframe:
onsets = file_dataframe[subject]["vmrk"].timings()
duration = file_dataframe[subject]["dat"].average_duration()
# Prep a dataframe to write to .tsv.
tsv_tuples = [ ("onset", "duration", "trial_type") ]
for onset in onsets:
tsv_tuples.append( (onset, duration, "gabor") )
tsv_dataframe = pandas.DataFrame(tsv_tuples)
# Get .tsv path.
func_file = file_dataframe[subject]["func"]
tsv_path = Path(str(func_file.path).replace("_bold.nii", "_events.tsv"))
# Write the .tsv
tsv_dataframe.to_csv(tsv_path, sep="\t", header=False, index=False)
| 22,835
|
def update_atoms_from_calc(atoms, calc=None, calc_prefix=''):
"""Update information in atoms from results in a calculator
Args:
atoms (ase.atoms.Atoms): Atoms object, modified in place
calc (ase.calculators.Calculator, optional): calculator to take results from.
Defaults to :attr:`atoms.calc`
calc_prefix (str, optional): String to prefix to results names
in `atoms.arrays` and `atoms.info.`
"""
if calc is None:
calc = atoms.calc
for prop, value in calc.results.items():
if prop in per_config_properties:
atoms.info[calc_prefix + prop] = value
elif prop in per_atom_properties:
atoms.arrays[calc_prefix + prop] = value
else:
raise KeyError(f'unknown property {prop}')
| 22,836
|
def get_protection_path_name(protection: Optional[RouteProtection]) -> str:
"""Get the protection's path name."""
if protection is None:
return DEFAULT_PROTECTION_NAME
return protection
| 22,837
|
def _get_object_description(target):
"""Return a string describing the *target*"""
if isinstance(target, list):
data = "<list, length {}>".format(len(target))
elif isinstance(target, dict):
data = "<dict, length {}>".format(len(target))
else:
data = target
return data
| 22,838
|
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
| 22,839
|
def dynamicviewset(viewset):
"""
The activate route only makes sense if
user activation is required, remove the
route if activation is turned off
"""
if not settings['REQUIRE_ACTIVATION'] and hasattr(viewset, 'activate'):
delattr(viewset, 'activate')
return viewset
| 22,840
|
def generate_arabic_place_name(min_length=0):
"""Return a randomly generated, potentially multi-word fake Arabic place name"""
make_name = lambda n_words: ' '.join(random.sample(place_names, n_words))
n_words = 3
name = make_name(n_words)
while len(name) < min_length:
n_words += 1
name = make_name(n_words)
return name
| 22,841
|
def main():
"""
Run each of the functions and store the results to be reported on in a
results file
"""
system_info = {}
args = handle_arguments()
system_info['profile'] = get_os_info(args.verbose)
system_info['compatability'] = check_system_type(
system_info.get('profile').get('based_on'),
system_info.get('profile').get('version'),
args.verbose
)
system_info['resources'] = system_requirements(args.verbose)
system_info['mounts'] = mounts_check(args.verbose)
system_info['resolv'] = inspect_resolv_conf(
'/etc/resolv.conf',
args.verbose
)
system_info['ports'] = check_open_ports(args.interface, args.verbose)
system_info['agents'] = check_for_agents(args.verbose)
system_info['modules'] = check_modules(
system_info.get('profile').get('distribution'),
system_info.get('profile').get('version'),
args.verbose
)
if system_info.get('profile').get('based_on').lower() == 'rhel':
system_info['selinux'] = selinux('/etc/selinux/config', args.verbose)
system_info['infinity_set'] = None
if system_info.get('profile').get('distribution').lower() == 'sles':
system_info['infinity_set'] = suse_infinity_check(
'/etc/systemd/system.conf',
args.verbose
)
system_info['sysctl'] = check_sysctl(args.verbose)
overall_result = process_results(system_info)
print('\nOverall Result: {0}'.format(overall_result))
print(
'To view details about the results a results.txt file has been '
'generated in the current directory\n'
)
| 22,842
|
def find_cutoffs(x,y,crdist,deltas):
"""function for identifying locations of cutoffs along a centerline
and the indices of the segments that will become part of the oxbows
from MeanderPy
x,y - coordinates of centerline
crdist - critical cutoff distance
deltas - distance between neighboring points along the centerline"""
diag_blank_width = int((crdist+20*deltas)/deltas)
# distance matrix for centerline points:
dist = distance.cdist(np.array([x,y]).T,np.array([x,y]).T)
dist[dist>crdist] = np.NaN # set all values that are larger than the cutoff threshold to NaN
# set matrix to NaN along the diagonal zone:
for k in range(-diag_blank_width,diag_blank_width+1):
rows, cols = kth_diag_indices(dist,k)
dist[rows,cols] = np.NaN
i1, i2 = np.where(~np.isnan(dist))
ind1 = i1[np.where(i1<i2)[0]] # get rid of unnecessary indices
ind2 = i2[np.where(i1<i2)[0]] # get rid of unnecessary indices
return ind1, ind2 # return indices of cutoff points and cutoff coordinates
| 22,843
|
def task_gist_submodule():
"""load gist as submodules from api data"""
import sqlite_utils, sqlite3, json
db = sqlite_utils.Database(sqlite3.connect(SETTINGS.db))
table = db["gist"]
for row in table.rows:
if not row["public"]:
continue
if not row["description"]:
continue
dir = Path(SETTINGS.name)
folder = dir / row["id"]
yield dict(
name=f"""submodule-add-{row["owner"]}-{row["id"]}""",
file_dep=[".git/config"],
actions=[f"""git submodule add --force {row["html_url"]} {folder}"""],
targets=[folder / x for x in json.loads(row["files"])],
)
| 22,844
|
def test_show_navbar_depth(sphinx_build_factory):
"""Test with different levels of show_navbar_depth."""
sphinx_build = sphinx_build_factory(
"base",
confoverrides={"html_theme_options.show_navbar_depth": 2},
).build(
assert_pass=True
) # type: SphinxBuild
sidebar = sphinx_build.html_tree("section1", "ntbk.html").find_all(
attrs={"class": "bd-sidebar"}
)[0]
for checkbox in sidebar.select("li.toctree-l1 > input"):
assert "checked" in checkbox.attrs
for checkbox in sidebar.select("li.toctree-l2 > input"):
assert "checked" not in checkbox.attrs
| 22,845
|
def log_stdout() -> logging.Logger:
"""
Returns stdout logging object
"""
log_level = logging.INFO
log = logging.getLogger("stdout_logger")
if not log.handlers:
log.setLevel(log_level)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
log.addHandler(sh)
log.handler_set = True
log.propagate = False
return log
| 22,846
|
def training_set_multiplication(training_set, mult_queue):
"""
Multiply the training set by all methods listed in mult_queue.
Parameters
----------
training_set :
set of all recordings that will be used for training
mult_queue :
list of all algorithms that will take one recording and generate more
than one.
Returns
-------
mutliple recordings
"""
logger.info("Multiply data...")
for algorithm in mult_queue:
new_trning_set = []
for recording in training_set:
samples = algorithm(recording["handwriting"])
for sample in samples:
new_trning_set.append(
{
"id": recording["id"],
"is_in_testset": 0,
"formula_id": recording["formula_id"],
"handwriting": sample,
"formula_in_latex": recording["formula_in_latex"],
}
)
training_set = new_trning_set
return new_trning_set
| 22,847
|
def addDictionaryFromWeb(url, params=None, **kwargs):
"""
指定した URL にあるページに含まれる辞書メタデータ(JSON-LD)を取得し、
メタデータに記載されている URL から地名解析辞書(CSVファイル)を取得し、
データベースに登録します。
既に同じ identifier を持つ辞書データがデータベースに登録されている場合、
削除してから新しい辞書データを登録します。
登録した辞書を利用可能にするには、 ``setActivateDictionaries()``
または ``activateDictionaires()`` で有効化する必要があります。
Parameters
----------
url : str
辞書メタデータを含むウェブページの URL。
params : dict, optional
requests.get に渡す params パラメータ。
**kwargs : dict, optional
requests.get に渡す kwargs パラメータ。
Returns
-------
bool
常に True。登録に失敗した場合は例外が発生します。
Examples
--------
>>> import pygeonlp.api as api
>>> api.init()
>>> api.addDictionaryFromWeb('https://geonlp.ex.nii.ac.jp/dictionary/geoshape-city/')
True
>>> api.updateIndex()
>>> api.activateDictionaries(pattern=r'geoshape-city')
['geonlp:geoshape-city']
>>> geowords = api.searchWord('千代田区')
>>> len(geowords)
1
>>> next(iter(geowords.values()))['dictionary_identifier']
'geonlp:geoshape-city'
"""
_check_initialized()
return _default_service.addDictionaryFromWeb(url, params, **kwargs)
| 22,848
|
def test_nested_blocks(pprint):
"""
Expected result:
procedure test(x, y: Integer);
begin
x:=1;
y:=200;
for z:= 1 to 100 do
begin
x := x + z;
end;
y:=x;
end;
"""
def brk(offset=0):
"force a new line and indent by given offset"
return T.BREAK(blankSpace=9999, offset=offset)
text = [
T.BEGIN(breakType=BreakType.consistent, offset=0),
T.STRING('procedure test(x, y: Integer);'), brk(),
T.STRING("begin"),
brk(2), T.STRING("x:=1;"),
brk(2), T.STRING("y:=200;"),
# indented for loop
brk(2), T.BEGIN(breakType=BreakType.consistent, offset=0),
T.STRING("for z:= 1 to 100 do"), brk(),
T.STRING("begin"),
brk(2), T.STRING("x := x + z;"), brk(),
T.STRING("end;"),
T.END(),
brk(2), T.STRING("y:=x;"), brk(),
T.STRING("end;"),
T.END(),
T.EOF()]
result = pprint(text)
assert result == (
'procedure test(x, y: Integer);\n'
'begin\n'
' x:=1;\n'
' y:=200;\n'
' for z:= 1 to 100 do\n'
' begin\n'
' x := x + z;\n'
' end;\n'
' y:=x;\n'
'end;'
)
| 22,849
|
async def read(
sensors: Sequence[Sensor], msg: str = "", retry_single: bool = False
) -> bool:
"""Read from the Modbus interface."""
global READ_ERRORS # pylint:disable=global-statement
try:
try:
await SUNSYNK.read(sensors)
READ_ERRORS = 0
return True
except asyncio.TimeoutError:
_LOGGER.error("Read error%s: Timeout", msg)
except ModbusIOException:
# TCP: try to reconnect since it got a fairly serious error
await asyncio.sleep(1)
await SUNSYNK.connect()
except Exception as err: # pylint:disable=broad-except
_LOGGER.error("Read Error%s: %s", msg, err)
READ_ERRORS += 1
if READ_ERRORS > 3:
raise Exception(f"Multiple Modbus read errors: {err}") from err
if retry_single:
_LOGGER.info("Retrying individual sensors: %s", [s.name for s in SENSORS])
for sen in sensors:
await asyncio.sleep(0.02)
await read([sen], msg=sen.name, retry_single=False)
return False
| 22,850
|
def initindex():
"""Delete all information from the elastic search Index."""
click.echo('Loading data into Elastic Search')
from app import data_loader
data_loader = data_loader.DataLoader()
data_loader.build_index()
| 22,851
|
def boxlist_iou_guide_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
scores = boxlist.get_field(score_field)
ious = boxlist.get_field('ious')
keep, scores_new = iou_guide_nms(boxes, scores, ious, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
scores_new = scores_new[:, max_proposals]
boxlist = boxlist[keep]
boxlist.add_field("scores", scores_new)
return boxlist.convert(mode)
| 22,852
|
def parse(tokens:list):
"""Transforme la liste des tokens en un arbre d'instructions ou de valeurs"""
ouverts=Pile(newnode(tokens[0]))
for token in tokens:
if token[0]=="balise":
if token[1][0]=="/":
if ouverts.top.REPR.lower()[:len(token[1])-1]!=token[1][1:]:
print(f"A tag has been opened({ouverts.top.REPR}) but not well closed(found {token[1][1:]})")
sys.exit()
else:
ouverts.pop()
else:
if token[1][-1]=="/": # Balise autofermante
token=token[0],token[1][:-1]
new=newnode(token)
ouverts.top.childs.append(new)
else:
new=newnode(token)
if new.REPR=="Indicium":
new.childs.append(ouverts.top.childs.pop())
ouverts.top.childs.append(new)
ouverts.add(new)
else:
ouverts.top.childs.append(newnode(token))
return ouverts.top
| 22,853
|
def get_payload_bin(payload, seconds):
"""
Since we can't run the ysoserial.exe file in ubuntu (at least not
easily with mono) we build the different payloads in windows and
save them to the PAYLOADS map above.
:param payload: The payload name
:param seconds: The seconds to wait
:return: The payload
"""
return SAVED_PAYLOADS[payload][seconds]
| 22,854
|
def srange(start, step, length, dtype=None):
"""
Like np.arange() but you give the start, the step, and the number
of steps. Saves having to compute the end point yourself.
"""
stop = start + (step * length)
return np.arange(start, stop, step, dtype)
| 22,855
|
def get_tool_by_id(tool_id):
"""
returns the tool given the id
"""
tool = ToolType.objects.get(pk=tool_id)
return tool
| 22,856
|
def domainr(text):
"""<domain> - uses domain.nr's API to search for a domain, and similar domains
:type text: str
"""
try:
data = http.get_json('http://domai.nr/api/json/search?q=' + text)
except (http.URLError, http.HTTPError):
return "Unable to get data for some reason. Try again later."
if data['query'] == "":
return "An error occurred: {status} - {message}".format(**data['error'])
domains = [format_domain(domain) for domain in data["results"]]
return "Domains: {}".format(", ".join(domains))
| 22,857
|
def manage_blog():
""" 博文管理页面路由 """
if 'adminname' in session:
if request.method == 'POST':
del_result = manage_del_blog(db, Post, Comment, request.form.get('edit_id'))
return del_result
else:
blog_list = Post.query.order_by(Post.post_time.desc()).all()
return render_template('admin_blog.html',
page_in='blog',
blog_list=blog_list)
else:
return redirect(url_for('login'))
| 22,858
|
def split(nodes, index, axis=0):
"""
Split a array of nodes into two separate, non-overlapping arrays.
Parameters
----------
nodes : numpy.ndarray
An N x M array of individual node coordinates (i.e., the
x-coords or the y-coords only)
index : int
The leading edge of where the split should occur.
axis : int, optional
The axis along which ``nodes`` will be split. Use `axis = 0`
to split along rows and `axis = 1` for columns.
Raises
------
ValueError
Trying to split ``nodes`` at the edge (i.e., resulting in the
original array and an empty array) will raise an error.
Returns
-------
n1, n2 : numpy.ndarrays
The two non-overlapping sides of the original array.
"""
if index + 1 >= nodes.shape[axis] or index == 0:
raise ValueError("cannot split grid at or beyond its edges")
if axis == 0:
n1, n2 = nodes[:index, :], nodes[index:, :]
elif axis == 1:
n1, n2 = nodes[:, :index], nodes[:, index:]
return n1, n2
| 22,859
|
def smart_oracle(oracle, text, code, block_len, max_rand):
"""Call oracle normally, or repeatedly call oracle in case of random prefix.
Returns "clean" oracle ouptut regardless of whether the oracle adds a
random prefix.
"""
if not max_rand:
return oracle(text, code) if code else oracle(text)
# append arbitrary bytes unlikely to occur in attacker-controlled plaintext
text_mod = bytearray([7] * block_len * 2) + text
success = False
while not success:
encrypted = oracle(text_mod, code) if code else oracle(text_mod)
text_start = blocks_aligned(encrypted, block_len, max_rand)
if text_start is not None:
success = True
return encrypted[text_start:]
| 22,860
|
def initializeMotorController(mc: BaseMotorController):
"""
Initializes a motor controller to an "initial state" (applies common settings).
: param mc : A VictorSPX or TalonSRX to initialize.
"""
if not wpilib.RobotBase.isSimulation():
mc.configFactoryDefault()
mc.configFactoryDefault(timeout)
mc.clearStickyFaults(timeout)
mc.setSafetyEnabled(False)
mc.setNeutralMode(ctre.NeutralMode.Brake)
| 22,861
|
def start_active_span_from_edu(
edu_content,
operation_name,
references=[],
tags=None,
start_time=None,
ignore_active_span=False,
finish_on_close=True,
):
"""
Extracts a span context from an edu and uses it to start a new active span
Args:
edu_content (dict): and edu_content with a `context` field whose value is
canonical json for a dict which contains opentracing information.
For the other args see opentracing.tracer
"""
if opentracing is None:
return _noop_context_manager()
carrier = json.loads(edu_content.get("context", "{}")).get("opentracing", {})
context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
_references = [
opentracing.child_of(span_context_from_string(x))
for x in carrier.get("references", [])
]
# For some reason jaeger decided not to support the visualization of multiple parent
# spans or explicitely show references. I include the span context as a tag here as
# an aid to people debugging but it's really not an ideal solution.
references += _references
scope = opentracing.tracer.start_active_span(
operation_name,
child_of=context,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
finish_on_close=finish_on_close,
)
scope.span.set_tag("references", carrier.get("references", []))
return scope
| 22,862
|
async def setup():
"""
Create a clean test database every time the tests are run.
"""
async with db.with_bind(DB_URL):
alembic_config = Config('./alembic.ini')
command.upgrade(alembic_config, 'head')
yield
| 22,863
|
def inference_video_feed(request, project_id):
"""inference_video_feed
"""
return Response({
"status": "ok",
"url": "http://" + inference_module_url() + "/video_feed?inference=1",
})
| 22,864
|
def extract_start_timestamp() -> datetime:
"""Define extraction start timestamp.
Returns:
Extraction start timestamp used for testing.
"""
timestamp = datetime(2019, 8, 6, tzinfo=timezone.utc)
return timestamp
| 22,865
|
def phi_pdf(X, corr=None):
"""
Standard normal PDF/Multivariate pdf.
**Input:**
* **X** (`float`)
Argument.
* **corr** (`ndarray`)
Correlation matrix.
**Output**
Standard normal PDF of X.
"""
norm_pdf = None
if isinstance(X, int) or isinstance(X, float):
norm_pdf = norm.pdf(X, loc=0, scale=1)
else:
if np.trace(corr) != len(X):
shape_error(' X or corr ')
else:
norm_pdf = multivariate_normal.pdf(X, cov=corr)
return norm_pdf
| 22,866
|
def computeStatistic( benchmarks, field, func ):
"""
Return the result of func applied to the values of field in benchmarks.
Arguments:
benchmarks: The list of benchmarks to gather data from.
field: The field to gather from the benchmarks.
func: The function to apply to the data, must accept a list and return a single value.
"""
results = []
for benchmark in benchmarks:
results.append( benchmark[ field ] )
return func( results )
| 22,867
|
def bus_update_request(payload):
"""Parser for `bus_update_request` tracepoint"""
try:
match = re.match(bus_update_request_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BusUpdateRequest(**match_group_dict)
except Exception as e:
raise ParserError(e.message)
| 22,868
|
def update_employee(request,id):
"""
Updating the employee profile.
"""
try:
obj = User.objects.get(id=id)
total_cl = obj.no_of_cl
total_sl = obj.no_of_sl
total_wh = obj.no_of_wh
attendance_cl = Attendance.objects.filter(id=id,leave_type='cl',approved_or_not=True).count()
attendance_sl = Attendance.objects.filter(id=id,leave_type='sl',approved_or_not=True).count()
attendance_wh = Attendance.objects.filter(id=id,leave_type='wl',approved_or_not=True).count()
taken_cl = (total_cl-attendance_cl)
taken_sl = (total_sl-attendance_sl)
taken_wh = (total_wh-attendance_wh)
if request.method == "GET":
form = EmployeeCreationForm(instance=obj,initial={'email':obj.email})
context = {
'form':form,
'obj':obj,
'attendance_cl':attendance_cl,
'attendance_sl':attendance_sl,
'attendance_wh':attendance_wh,
'taken_cl':taken_cl,
'taken_sl':taken_sl,
'taken_wh':taken_wh
}
return render (request,'Employees/edit_employee.html', context)
elif request.method == "POST":
form = EmployeeCreationForm(request.POST,request.FILES,instance=obj)
if form.is_valid():
form_save = form.save(commit=False)
form_save.email = form.cleaned_data['email']
form_save.img = form.cleaned_data['img']
form_save.save()
return render(request,'Employees/edit_employee.html',{'form':form})
else:
return render(request,'Employees/edit_employee.html',{'form':form})
else:
return HttpResponseRedirect('/forbidden/')
except Exception, e:
return HttpResponseRedirect('/error/')
| 22,869
|
def func(TI, S0, alpha, T1):
""" exponential function for T1-fitting.
Args
----
x (numpy.ndarray): Inversion times (TI) in the T1-mapping sequence as input for the signal model fit.
Returns
-------
a, b, T1 (numpy.ndarray): signal model fitted parameters.
"""
mz = 1 - alpha * np.exp(-TI*(alpha-1)/T1)
return np.abs(S0 * mz)
| 22,870
|
def validate(spec):
"""Decorator to validate a REST endpoint input.
Uses the schema defined in the openapi.yml file
to validate.
"""
def validate_decorator(func):
@functools.wraps(func)
def wrapper_validate(*args, **kwargs):
try:
data = request.get_json()
except BadRequest:
result = "The request body is not a well-formed JSON."
log.debug("create_circuit result %s %s", result, 400)
raise BadRequest(result) from BadRequest
if data is None:
result = "The request body mimetype is not application/json."
log.debug("update result %s %s", result, 415)
raise UnsupportedMediaType(result)
validator = RequestValidator(spec)
openapi_request = FlaskOpenAPIRequest(request)
result = validator.validate(openapi_request)
if result.errors:
errors = result.errors[0]
if hasattr(errors, "schema_errors"):
schema_errors = errors.schema_errors[0]
error_log = {
"error_message": schema_errors.message,
"error_validator": schema_errors.validator,
"error_validator_value": schema_errors.validator_value,
"error_path": list(schema_errors.path),
"error_schema": schema_errors.schema,
"error_schema_path": list(schema_errors.schema_path),
}
log.debug("error response: %s", error_log)
error_response = f"{schema_errors.message} for field"
error_response += f" {'/'.join(schema_errors.path)}."
else:
error_response = (
"The request body mimetype is not application/json."
)
raise BadRequest(error_response) from BadRequest
return func(*args, data=data, **kwargs)
return wrapper_validate
return validate_decorator
| 22,871
|
def make_random_coordinate():
""" Make a random coordinate dictionary"""
return make_coordinate(randint(0, 100), randint(0, 100))
| 22,872
|
def smiles_to_antechamber(
smiles_string,
gaff_mol2_filename,
frcmod_filename,
residue_name="MOL",
strictStereo=False,
):
"""Build a molecule from a smiles string and run antechamber,
generating GAFF mol2 and frcmod files from a smiles string. Charges
will be generated using the OpenEye QuacPac AM1-BCC implementation.
Parameters
----------
smiles_string : str
Smiles string of molecule to construct and charge
gaff_mol2_filename : str
Filename of mol2 file output of antechamber, with charges
created from openeye
frcmod_filename : str
Filename of frcmod file output of antechamber. Most likely
this file will be almost empty, at least for typical molecules.
residue_name : str, optional, default="MOL"
OpenEye writes mol2 files with <0> as the residue / ligand name.
This chokes many mol2 parsers, so we replace it with a string of
your choosing. This might be useful for downstream applications
if the residue names are required to be unique.
strictStereo : bool, optional, default=False
If False, permits smiles strings with unspecified stereochemistry.
See https://docs.eyesopen.com/omega/usage.html
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed():
raise (ImportError("Need License for oechem!"))
# Get the absolute path so we can find these filenames from inside a temporary directory.
gaff_mol2_filename = os.path.abspath(gaff_mol2_filename)
frcmod_filename = os.path.abspath(frcmod_filename)
m = smiles_to_oemol(smiles_string)
m = get_charges(m, strictStereo=strictStereo, keep_confs=1)
with enter_temp_directory(): # Avoid dumping 50 antechamber files in local directory.
_unused = molecule_to_mol2(m, "./tmp.mol2", residue_name=residue_name)
net_charge = oechem.OENetCharge(m)
tmp_gaff_mol2_filename, tmp_frcmod_filename = run_antechamber(
"tmp", "./tmp.mol2", charge_method=None, net_charge=net_charge
) # USE OE AM1BCC charges!
shutil.copy(tmp_gaff_mol2_filename, gaff_mol2_filename)
shutil.copy(tmp_frcmod_filename, frcmod_filename)
| 22,873
|
def analyze():
""" kicks bottle to separate from other things """
output = bottle_finder.detect_bottle()
if output:
print("Bottle found")
belt_controller.write(b'b')
print('kicked')
return
print("Bottle not detected")
belt_controller.write(b'n')
| 22,874
|
def inv_qft_core(qubits):
"""
Generates a quil programm that performs
inverse quantum fourier transform on given qubits
without swaping qubits at the end.
:param qubits: A list of qubit indexes.
:return: A Quil program to compute the invese QFT of the given qubits without swapping.
"""
qft_quil = Program.inst(qft_core(qubits, coef=-1))
inv_qft_quil = Program()
while(len(qft_quil) > 0):
inst = qft_quil.pop()
inv_qft_quil.inst(inst)
return inv_qft_quil
| 22,875
|
def analysis_precheck(_id, feature_table, rep_seqs, taxonomy, metadata):
"""
Do prechecks as to decrease the chance of job failing.
Input:
- feature_table: QIIME2 artifact of type FeatureTable[Frequency]
- rep_seqs: QIIME2 artifact of type FeatureData[Sequence]
"""
feature_table_path = save_uploaded_file(_id, feature_table)
rep_seqs_path = save_uploaded_file(_id, rep_seqs)
taxonomy_path = save_uploaded_file(_id, taxonomy)
metadata_path = save_uploaded_file(_id, metadata)
def validate_analysis_input(feature_table, rep_seqs, taxonomy):
"""
Precheck input files prior to running denoise step
Input:
- feature_table: Path to QIIME2 artifact of type FeatureTable[Frequency]
- rep_seqs: Path to QIIME2 artifact of type FeatureData[Sequence]
"""
# Check Artifact type
try:
feature_table_artifact = Artifact.load(feature_table)
rep_seqs_artifact = Artifact.load(rep_seqs)
if(str(feature_table_artifact.type) != "FeatureTable[Frequency]"):
msg = "Input Feature Table is not of type 'FeatureTable[Frequency]'!"
raise ValueError(msg)
if(str(rep_seqs_artifact.type) != "FeatureData[Sequence]"):
msg = "Input Representative Sequences is not of type 'FeatureData[Sequence]'!"
raise ValueError(msg)
except ValueError as err:
message = str(err)
return 400, message
return 200, "Imported data good!"
responseIfError(validate_analysis_input, feature_table=feature_table_path, rep_seqs=rep_seqs_path, taxonomy=taxonomy_path)
return feature_table_path, rep_seqs_path, taxonomy_path, metadata_path
| 22,876
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Deluge from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
api = await hass.async_add_executor_job(
DelugeRPCClient, host, port, username, password
)
api.web_port = entry.data[CONF_WEB_PORT]
try:
await hass.async_add_executor_job(api.connect)
except (
ConnectionRefusedError,
socket.timeout,
SSLError,
) as ex:
raise ConfigEntryNotReady("Connection to Deluge Daemon failed") from ex
except Exception as ex: # pylint:disable=broad-except
if type(ex).__name__ == "BadLoginError":
raise ConfigEntryAuthFailed(
"Credentials for Deluge client are not valid"
) from ex
_LOGGER.error("Unknown error connecting to Deluge: %s", ex)
coordinator = DelugeDataUpdateCoordinator(hass, api, entry)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
| 22,877
|
def powerFactor(n):
"""Function to compute power factor given a complex power value
Will this work if we're exporting power? I think so...
"""
# Real divided by apparent
pf = n.real.__abs__() / n.__abs__()
# Determine lagging vs leading (negative).
# NOTE: cmath.phase returns counter-clockwise angle on interval [-pi, pi],
# so checking sign should be reliable for determining lead vs. lag
p = cmath.phase(n)
if p < 0:
return (pf, 'lead')
else:
return (pf, 'lag')
| 22,878
|
def send_request(url, raise_errors):
"""
Sends a request to a URL and parses the response with lxml.
"""
try:
response = requests.get(url, headers={'Accept-Language': '*'}, verify=_PEM_PATH)
response.raise_for_status()
doc = html.fromstring(response.text)
return doc
except requests.exceptions.RequestException:
if raise_errors:
raise
return None
| 22,879
|
def test_telluric_import():
"""Can we import the module?"""
skymodel = TelluricModel()
assert isinstance(skymodel, torch.nn.Module)
| 22,880
|
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
| 22,881
|
def crop_array(input_array, ylength, xlength=None, orgn=(0,0)):
"""Crops an image in numpy array format. Pads crops outside
of input image with zeros if necessary. If no y dimension
is specified, outputs a square image.
"""
if xlength == None:
xlength = ylength
ylength = int(ylength)
xlength = int(xlength)
orgn = (int(orgn[0]), int(orgn[1]))
target = np.zeros((ylength, xlength))
#slice ranges
ymin = max(orgn[0], 0)
xmin = max(orgn[1], 0)
ymax = min(orgn[0] + ylength, input_array.shape[0])
xmax = min(orgn[1] + xlength, input_array.shape[1])
yslice = slice(ymin, ymax)
xslice = slice(xmin, xmax)
#top, left, bottom, right pads
tp = max(-orgn[0], 0)
lp = max(-orgn[1], 0)
bp = max((ylength + orgn[0] - tp - input_array.shape[0]), 0)
rp = max((xlength + orgn[1] - lp - input_array.shape[1]), 0)
#insert slice into the right spot.
target[tp:(ylength-bp),lp:(xlength-rp)] = input_array[yslice, xslice]
return target
| 22,882
|
def get_service_defaults(servicename, version, **_):
"""
Load the default configuration for a given service version
Variables:
servicename => Name of the service to get the info
version => Version of the service to get
Data Block:
None
Result example:
{'accepts': '(archive|executable|java|android)/.*',
'category': 'Extraction',
'classpath': 'al_services.alsvc_extract.Extract',
'config': {'DEFAULT_PW_LIST': ['password', 'infected']},
'cpu_cores': 0.1,
'description': "Extracts some stuff"
'enabled': True,
'name': 'Extract',
'ram_mb': 256,
'rejects': 'empty|metadata/.*',
'stage': 'EXTRACT',
'submission_params': [{'default': u'',
'name': 'password',
'type': 'str',
'value': u''},
{'default': False,
'name': 'extract_pe_sections',
'type': 'bool',
'value': False},
{'default': False,
'name': 'continue_after_extract',
'type': 'bool',
'value': False}],
'timeout': 60}
"""
service = STORAGE.service.get(f"{servicename}_{version}", as_obj=False)
if service:
return make_api_response(service)
else:
return make_api_response("", err=f"{servicename} service does not exist", status_code=404)
| 22,883
|
def read_nnet3_model(model_path: str) -> nnet3.Nnet:
"""Read in a nnet3 model in raw format.
Actually if this model is not a raw format it will still work, but this is
not an official feature; it was due to some kaldi internal code.
Args:
model_path: Path to a raw nnet3 model, e.g., "data/final.raw"
Returns:
nnet: A neural network AM.
"""
nnet = nnet3.Nnet()
with xopen(model_path) as istream:
nnet.read(istream.stream(), istream.binary)
return nnet
| 22,884
|
def flat_list_of_lists(l):
"""flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
return [item for sublist in l for item in sublist]
| 22,885
|
def cancel(api, order_ids=None):
"""
DELETE all orders by api["symbol"] (or) by symbol and order_id:
"""
if DETAIL:
print(cancel.__doc__, "symbol", api['symbol'], "order_ids", order_ids)
if order_ids is None:
order_ids = [] # must be a list
# format remote procedure call to exchange api standards
symbol = symbol_syntax(api["exchange"], api['symbol'])
if not order_ids:
print("Cancel All")
else:
print("Cancel Order Ids:", order_ids)
# Coinbase and Poloniex offer both Cancel All and Cancel One
if api["exchange"] in ["coinbase", "poloniex"]:
if order_ids:
# Cancel a list of orders
ret = []
for order_id in order_ids:
print("Cancel Order", order_id)
if api["exchange"] == "coinbase":
api["endpoint"] = "/orders/" + str(order_id)
api["params"] = {}
api["method"] = "DELETE"
elif api["exchange"] == "poloniex":
api["endpoint"] = "/tradingApi"
api["params"] = {"command": "cancelOrder", "orderNumber": int(order_id)}
api["method"] = "POST"
response = process_request(api)
ret.append({"order_id": order_id, "response": response})
else:
# Cancel All
if api["exchange"] == "coinbase":
api["endpoint"] = "/orders"
api["params"] = {"product_id": symbol}
api["method"] = "DELETE"
elif api["exchange"] == "poloniex":
api["endpoint"] = "/tradingApi"
api["params"] = {"command": "cancelAllOrders", "currencyPair": symbol}
api["method"] = "POST"
ret = process_request(api)
# Handle cases where "Cancel All" in one market is not supported
elif api["exchange"] in ["kraken", "binance", "bittrex", "Bitfinex"]:
if (api["exchange"] == "bitfinex") and not api["symbol"]:
print("WARN: Cancel All in ALL MARKETS")
api["endpoint"] = "/v2/auth/w/order/cancel/multi"
api["params"] = {}
api["method"] = "POST"
ret = process_request(api)
else:
# If we have an order_ids list we'll use it, else make one
if not order_ids:
print("Open Orders call to suppport Cancel All")
orders = get_orders(api)
order_ids = []
for order in orders["asks"]:
order_ids.append(order["order_id"])
for order in orders["bids"]:
order_ids.append(order["order_id"])
ret = []
for order_id in order_ids:
print("Cancel Order", order_id)
if api['exchange'] == "bitfinex":
api["endpoint"] = "/v2/auth/w/order/cancel"
api["params"] = {"id": order_id}
api["method"] = ""
elif api["exchange"] == "binance":
api["endpoint"] = "/api/v3/order"
api["params"] = {"symbol": symbol, "orderId": order_id}
api["method"] = "DELETE"
elif api["exchange"] == "bittrex":
api["endpoint"] = "/api/v1.1/market/cancel"
api["params"] = {"uuid": order_id}
api["method"] = "GET"
elif api["exchange"] == "kraken":
api["endpoint"] = "/0/private/CancelOrder"
api["params"] = {"txid": order_id}
api["method"] = "POST"
response = process_request(api)
ret.append(response)
return ret
| 22,886
|
def prev_next_group(project, group):
"""Return adjacent group objects or None for the given project and group.
The previous and next group objects are relative to sort order of the
project's groups with respect to the passed in group.
"""
# TODO: Profile and optimize this query if necessary
groups = sorted(x for x in project.groups if x.submissions)
try:
index = groups.index(group)
except ValueError:
return None, None
prev_group = groups[index - 1] if index > 0 else None
next_group = groups[index + 1] if index + 1 < len(groups) else None
return prev_group, next_group
| 22,887
|
def decrypt(mess, key):
"""Decrypt the cypher text using AES decrypt"""
if len(key) % 16 != 0:
a = 16 - len(key) % 16
key = key.ljust(len(key) + a)
cipher = AES.new(key)
plain_txt = cipher.decrypt(mess)
return plain_txt
| 22,888
|
def multiVecMat( vector, matrix ):
"""
Pronásobí matici vektorem zprava.
Parametry:
----------
vector: list
Vektor
matrix: list
Pronásobená matice. Její dimenze se musí shodovat s dimenzí
vektoru.
Vrací:
list
Pole velikosti vektoru.
"""
# Vytvoří pole o velikosti vektoru
result = [0] * len( matrix[0] )
# Projde matici po řádcích
for r, row in enumerate( matrix ):
# Pokud nesedí rozměry, končíme
if len(row) != len(vector):
return None
# Projde každý prvek v řádku
for i, elem in enumerate( row ):
# K poli s výsledkem přičte na index aktuálního řádku výsledek
# násobení aktuálního prvku v řádku a jemu odpovídajícího
# prvku z vektoru.
result[r] += elem * vector[i]
return result
| 22,889
|
def handleEvent(eventKey):
"""Process an incoming Kaku Event.
Retrieve the event data from the key given and call the appropriate handler.
Valid Event Types are mention, post, gather
For gather events, only the data item will be found
For mention and post, action and data will be found
Valid Event Action are create, update, delete, undelete
Event Data is a dict of items relevant to the event
"""
try:
event = json.loads(db.get(eventKey))
eventType = event['type']
if eventType == 'gather':
handleGather(event['data'])
else:
eventAction = event['action']
eventData = event['data']
logger.info('dispatching %(action)s for %(type)s' % event)
if eventType == 'post':
handlePost(eventAction, eventData)
elif eventType == 'mention':
handleMentions(eventAction, eventData)
db.expire(eventKey, 86400)
except:
logger.exception('error during event [%s]' % eventKey)
| 22,890
|
def verify_state(
state_prec_gdf,
state_abbreviation,
source,
year,
county_level_results_df,
office,
d_col=None,
r_col=None,
path=None,
):
"""
returns a complete (StateReport) object and a ((CountyReport) list) for the state.
:state_prec_gdf: (GeoDataFrame) containing precinct geometries and election results
:state_abbreviation: (str) e.g. 'MA' for Massachusetts
:source: (str) person or organization that made the 'state_prec_gdf' e.g 'VEST'
:year: (str) 'YYYY' indicating the year the election took place e.g. '2016'
:county_level_results_df: (DataFrame) containing official county-level election results
:office: (str) office to be evaluated in vote validation e.g. 'U.S. Senate'
:d_col: (str) denotes the column for democratic vote counts in each precinct
:r_col: (str) denotes the column for republican vote counts in each precinct
:path: (str) filepath to which the report should be saved (if None it won't be saved)
d_col, r_col are optional - if they are not provided, `get_party_cols` will be used
to guess based on comparing each column in state_prec_gdf to the expected results.
"""
print("Starting verification process for: ", state_abbreviation, source, year)
state_prec_gdf = state_prec_gdf.reset_index()
county_level_results_df = county_level_results_df.reset_index()
# enforce expected schema
assert "geometry" in state_prec_gdf.columns
assert {"county", "GEOID", "party", "votes"}.issubset(
set(county_level_results_df.columns)
)
# assign d_col and r_col
if not d_col or not r_col:
print("Candidate vote count columns are being assigned automatically")
d_col, r_col = get_party_cols(state_prec_gdf, state_abbreviation)
else:
print("Candidate vote count columns are being assigned manually")
print("Choose d_col as: ", d_col)
print("Choose r_col as: ", r_col)
state_prec_gdf = state_prec_gdf.rename(columns={d_col: "d_col", r_col: "r_col"})
# remove unecessary columns
cols_to_keep = ["d_col", "r_col", "geometry"]
if "GEOID" in state_prec_gdf.columns:
cols_to_keep.append("GEOID")
state_prec_gdf = state_prec_gdf[cols_to_keep]
print("Verification will now begin with this GeoDataFrame: \n")
print(state_prec_gdf.head())
# initialize state report
print("Starting Vote Verification")
state_report = StateReport(
county_level_results_df,
state_prec_gdf,
state_abbreviation,
year,
source,
office,
)
# poplulate the report
print("Starting Topology Verification")
state_report = verify_topology(state_prec_gdf, state_report)
print("Starting County Verification")
# assign GEOID
if "GEOID" not in state_prec_gdf.columns:
try:
print("Missing GEOID Column - attempting automatic assignment")
state_prec_gdf = assign_GEOID(state_prec_gdf, state_report.fips)
print("GEOID assignment successful")
except:
pass
else:
print("Using the GEOID Column in the original shapefile.")
assert "GEOID" in state_prec_gdf.columns
state_report, county_reports = verify_counties(
state_prec_gdf, county_level_results_df, state_report
)
if path:
make_report(path, state_report, county_reports)
print("All done!\n")
return state_report, county_reports
| 22,891
|
def remove_profile(serial, profile_id):
"""hubcli doesn't remove profiles so we have to do this server-side."""
r = requests.post(
url=f"https://{ AIRWATCH_DOMAIN }/API/mdm/profiles/{ profile_id }/remove",
json={"SerialNumber": serial},
headers={
"aw-tenant-code": AIRWATCH_KEY,
"Content-Type": "application/json",
"Accept": "application/json",
},
auth=HTTPBasicAuth(AIRWATCH_USER, AIRWATCH_PASSWORD),
)
r.raise_for_status()
return r
| 22,892
|
def ProgressBar(Title, Percent, InProgress=None):
"""Displays a progress bar.
Title - Title of the progress bar. Three periods are automatically added to the end.
Percent - A number 1-100 indicating what percent of the task is done.
Optional:
InProgress - What part of the task is currently being performed. Three periods are
automatically added to the end.
"""
if 0 > Percent > 100:
raise ValueError, "Percent must be between 1-100, not "+`Percent`
Title += "..."
print Title
_PrintTitleLine(Title)
_ProgressBar(Percent)
if InProgress:
print "Status:", InProgress+"..."
print
print
| 22,893
|
def skin_detect_percentage(image_dir=None):
"""Skin detection from image."""
result = skin_detect(image_dir)
filename = os.path.join(PROCESSED_DIR, image_dir.split('/')[-1])
if not os.path.exists(PROCESSED_DIR):
os.makedirs(PROCESSED_DIR)
cv2.imwrite(filename, result)
# take pixel values from inside contours,
# that way we get random samples as well.
grey_img = cv2.cvtColor(result, cv2.COLOR_RGB2GRAY)
greyscale_skin_nonzero_count = cv2.countNonZero(grey_img)
return float(greyscale_skin_nonzero_count)/float(grey_img.size)
| 22,894
|
def KELCH(df, n):
"""
Keltner Channel
"""
temp = (df['High'] + df['Low'] + df['Close']) / 3
KelChM = pd.Series(temp.rolling(n).mean(), name='KelChM_' + str(n))
temp = (4 * df['High'] - 2 * df['Low'] + df['Close']) / 3
KelChU = pd.Series(temp.rolling(n).mean(), name='KelChU_' + str(n))
temp = (-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3
KelChD = pd.Series(temp.rolling(n).mean(), name='KelChD_' + str(n))
result = pd.DataFrame([KelChM, KelChU, KelChD]).transpose()
return out(SETTINGS, df, result)
| 22,895
|
def find_isotopes(ms, peptides_in_bin, tolerance=0.01):
"""
Find the isotopes between mass shifts using mass difference of C13 and C12, information of amino acids statistics as well.
Paramenters
-----------
ms : Series
Series with mass in str format as index and values float mass shift.
peptides_in_bin : Series
Series with # of peptides in each mass shift.
tolerance : float
Tolerance for isotop matching.
Returns
-------
DataFrame with 'isotop'(boolean) and 'monoisotop_index' columns.
"""
out = pd.DataFrame({'isotope': False, 'monoisotop_index': None}, index=ms.index)
np_ms = ms.to_numpy()
difference_matrix = np.abs(np_ms.reshape(-1, 1) - np_ms.reshape(1, -1) - DIFF_C13)
isotop, monoisotop = np.where(difference_matrix < tolerance)
logger.debug('Found %d potential isotopes.', isotop.sum())
out.iloc[isotop, 0] = True
out.iloc[isotop, 1] = out.iloc[monoisotop, :].index
for i, row in out.iterrows():
if row['isotope']:
if peptides_in_bin[i] > peptides_in_bin[row['monoisotop_index']]:
out.at[i, 'isotope'], out.at[i, 'monoisotop_index'] = False, None
return out
| 22,896
|
def wait_until_complete(jobs):
"""wait jobs finish"""
return [j.get() for j in jobs]
| 22,897
|
def torquery(url):
"""
Uses pycurl to fetch a site using the proxy on the SOCKS_PORT.
"""
output = io.BytesIO()
query = pycurl.Curl()
query.setopt(pycurl.URL, url)
query.setopt(pycurl.PROXY, 'localhost')
query.setopt(pycurl.PROXYPORT, SOCKS_PORT)
query.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
query.setopt(pycurl.WRITEFUNCTION, output.write)
try:
query.perform()
return output.getvalue()
except pycurl.error as exc:
return "Unable to reach %s (%s)" % (url, exc)
| 22,898
|
def check_for_negative_residual(vel, data, errors, best_fit_list, dct,
signal_ranges=None, signal_mask=None,
force_accept=False, get_count=False,
get_idx=False, noise_spike_mask=None):
"""Check for negative residual features and try to refit them.
We define negative residual features as negative peaks in the residual that were introduced by the fit. These negative peaks have to have a minimum negative signal-to-noise ratio of dct['snr_negative'].
In case of a negative residual feature, we try to replace the Gaussian fit component that is causing the feature with two narrower components. We only accept this solution if it yields a better fit as determined by the AICc value.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
dct : dict
Dictionary containing parameter settings for the improved fitting.
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
get_count : bool
Default is 'False'. If set to 'True', only the number of occurring negative residual features will be returned.
get_idx : bool
Default is 'False'. If set to 'True', the index of the Gaussian fit component causing the negative residual feature is returned. In case of multiple negative residual features, only the index of one of them is returned.
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
"""
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
# in case a single rms value is given instead of an array
if not isinstance(errors, np.ndarray):
errors = np.ones(len(data)) * errors
if ncomps_fit == 0:
if get_count:
return 0
return best_fit_list
residual = best_fit_list[4]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
amp_guesses, fwhm_guesses, offset_guesses = get_initial_guesses(
residual, errors[0], dct['snr_negative'], dct['significance'],
peak='negative')
# check if negative residual feature was already present in the data
remove_indices = []
for i, offset in enumerate(offset_guesses):
if residual[offset] > (data[offset] - dct['snr']*errors[0]):
remove_indices.append(i)
if len(remove_indices) > 0:
amp_guesses, fwhm_guesses, offset_guesses = remove_components_from_sublists(
[amp_guesses, fwhm_guesses, offset_guesses], remove_indices)
if get_count:
return (len(amp_guesses))
if len(amp_guesses) == 0:
return best_fit_list
# in case of multiple negative residual features, sort them in order of increasing amplitude values
sort = np.argsort(amp_guesses)
amp_guesses = np.array(amp_guesses)[sort]
fwhm_guesses = np.array(fwhm_guesses)[sort]
offset_guesses = np.array(offset_guesses)[sort]
for amp, fwhm, offset in zip(amp_guesses, fwhm_guesses, offset_guesses):
idx_low = max(0, int(offset - fwhm))
idx_upp = int(offset + fwhm) + 2
exclude_idx = check_which_gaussian_contains_feature(
idx_low, idx_upp, fwhms_fit, offsets_fit)
if get_idx:
return exclude_idx
if exclude_idx is None:
continue
params_fit = replace_gaussian_with_two_new_ones(
data, vel, errors[0], dct['snr'], dct['significance'],
params_fit, exclude_idx, offset)
best_fit_list = get_best_fit(
vel, data, errors, params_fit, dct, first=False,
best_fit_list=best_fit_list, signal_ranges=signal_ranges,
signal_mask=signal_mask, force_accept=force_accept,
noise_spike_mask=noise_spike_mask)
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
return best_fit_list
| 22,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.