content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import Dict
def rand_index(pred_cluster: Dict, target_cluster: Dict) -> float:
"""Use contingency_table to get RI directly
RI = Accuracy = (TP+TN)/(TP,TN,FP,FN)
Args:
pred_cluster: Dict element:cluster_id (cluster_id from 0 to max_size)| predicted clusters
target_cluster: Dict element:cluster_id (cluster_id from 0 to max_size) | target clusters
Return:
RI (float)
"""
pred_cluster_ = helper_trans_to_element2clusterid(pred_cluster)
target_cluster_ = helper_trans_to_element2clusterid(target_cluster)
pred_cluster_size = len(pred_cluster_)
target_cluster_size = len(target_cluster_)
contingency_table = np.zeros((pred_cluster_size,target_cluster_size))
for i, p_cluster in enumerate(pred_cluster_):
for j, t_cluster in enumerate(target_cluster_):
#find common element
l = [*p_cluster,*t_cluster]
contingency_table[i][j] = len(l) - len(set(l))
s = comb(np.sum(contingency_table), 2)
a = 0
for i in np.nditer(contingency_table):
a += comb(i,2)
return a/s
|
19ccbd6708abe6b3a05dc23843fa21e0f6d804e9
| 3,648,600
|
import re
import time
from datetime import datetime
def _strToDateTimeAndStamp(incoming_v, timezone_required=False):
"""Test (and convert) datetime and date timestamp values.
@param incoming_v: the literal string defined as the date and time
@param timezone_required: whether the timezone is required (ie, for date timestamp) or not
@return datetime
@rtype: datetime.datetime
@raise ValueError: invalid datetime or date timestamp
"""
# First, handle the timezone portion, if there is any
(v, tzone) = _returnTimeZone(incoming_v)
# Check on the timezone. For time date stamp object it is required
if timezone_required and tzone is None:
raise ValueError("Invalid datetime %s" % incoming_v)
# The microseconds should be handled here...
final_v = v
milliseconds = 0
milpattern = "(.*)(\.)([0-9]*)"
match = re.match(milpattern, v)
if match is not None:
# we have a millisecond portion...
try:
final_v = match.groups()[0]
milliseconds = int(match.groups()[2])
except:
raise ValueError("Invalid datetime %s" % incoming_v)
#
# By now, the pattern should be clear
# This may raise an exception...
try:
tstr = time.strptime(final_v, "%Y-%m-%dT%H:%M:%S")
if tzone is not None:
return datetime.datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday, tstr.tm_hour, tstr.tm_min, tstr.tm_sec,
milliseconds, tzone)
else:
return datetime.datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday, tstr.tm_hour, tstr.tm_min, tstr.tm_sec,
milliseconds)
except:
raise ValueError("Invalid datetime %s" % incoming_v)
|
fa0362976c3362e32c4176b4bd4c84ae0c653080
| 3,648,601
|
def get_price_for_market_stateless(result):
"""Returns the price for the symbols that the API doesnt follow the market state (ETF, Index)"""
## It seems that for ETF symbols it uses REGULAR market fields
return {
"current": result['regularMarketPrice']['fmt'],
"previous": result['regularMarketPreviousClose']['fmt'],
"change": result['regularMarketChange']['fmt'],
"percent": result['regularMarketChangePercent']['fmt']
}
|
6afb9d443f246bd0db5c320a41c8341953f5dd7a
| 3,648,602
|
def make_sure_not_modified(arg):
""" Function checking whether annotation of SomeList is never resized
and never modified, useful for debugging. Does nothing when run directly
"""
return arg
|
ef86ed28a7e5ebdac27b7cee85fddec669604798
| 3,648,603
|
def jump(current_command):
"""Return Jump Mnemonic of current C-Command"""
#jump exists after ; if ; in string. Always the last part of the command
if ";" in current_command:
command_list = current_command.split(";")
return command_list[-1]
else:
return ""
|
2530ae99fcc4864c5e529d783b687bfc00d58156
| 3,648,604
|
def get_veterans(uname=None):
"""
@purpose: Runs SQL commands to querey the database for information on veterans.
@args: The username of the veteran. None if the username is not provided.
@returns: A list with one or more veterans.
"""
vet = None
if uname:
command = "SELECT * FROM veterans WHERE username = '{}' ".format(uname)
else:
command = "SELECT * FROM veterans"
with sql.connect(DATABASE) as con:
cur = con.cursor()
cur.execute(command)
if uname:
vet = cur.fetchone()
else:
vet = cur.fetchall()
cur.close()
if vet is not None and len(vet) > 10:
return vet[0:10]
else:
return vet
|
df97dee334332613b52c745c3f20c4509c0e0cb9
| 3,648,605
|
from matplotlib.collections import LineCollection
def multiline(xs, ys, c=None, ax=None, **kwargs):
"""
Plot lines with different colorings
Adapted from: https://stackoverflow.com/a/50029441/2565317
Parameters
----------
xs : iterable container of x coordinates
ys : iterable container of y coordinates
c : iterable container of numbers mapped to colormap
ax (optional): Axes to plot on.
kwargs (optional): passed to LineCollection
EXAMPLE:
xs = [[0, 1],
[0, 1, 2]]
ys = [[0, 0],
[1, 2, 1]]
c = [0, 1]
lc = multiline(xs, ys, c, cmap='bwr', lw=2)
Notes:
len(xs) == len(ys) == len(c) is the number of line segments
len(xs[i]) == len(ys[i]) is the number of points for each line (indexed by i)
Returns
-------
lc : LineCollection instance.
"""
# find axes
ax = plt.gca() if ax is None else ax
n = len(xs)
if c is None:
c = np.linspace(0, 1, n)
# create LineCollection
segments = [np.column_stack([x, y]) for x, y in zip(xs, ys)]
lc = LineCollection(segments, **kwargs)
# set coloring of line segments
# Note: I get an error if I pass c as a list here... not sure why.
lc.set_array(np.asarray(c))
# add lines to axes and rescale
# Note: adding a collection doesn't autoscalee xlim/ylim
ax.add_collection(lc)
ax.autoscale()
return lc
|
d91c60faf89422f7d6659357946e816d87dd6ef8
| 3,648,606
|
def mock_movement_handler() -> AsyncMock:
"""Get an asynchronous mock in the shape of an MovementHandler."""
return AsyncMock(spec=MovementHandler)
|
85579588dc5d8e6cb37bc85bc652f70d3fca8022
| 3,648,607
|
def compute( op , x , y ):
"""Compute the value of expression 'x op y', where -x and y
are two integers and op is an operator in '+','-','*','/'"""
if (op=='+'):
return x+y
elif op=='-':
return x-y
elif op=='*':
return x*y
elif op=='/':
return x/y
else:
return 0
|
dbdf73a91bdb7092d2a18b6245ce6b8d75b5ab33
| 3,648,608
|
def list_arg(raw_value):
"""argparse type for a list of strings"""
return str(raw_value).split(',')
|
24adb555037850e8458cde575ed360265a20cea5
| 3,648,609
|
def create_tracking(slug, tracking_number):
"""Create tracking, return tracking ID
"""
tracking = {'slug': slug, 'tracking_number': tracking_number}
result = aftership.tracking.create_tracking(tracking=tracking, timeout=10)
return result['tracking']['id']
|
4f5d645654604787892f1373759e5d40ce01b2fe
| 3,648,610
|
def get_indentation(line_):
"""
returns the number of preceding spaces
"""
return len(line_) - len(line_.lstrip())
|
23a65ba620afa3268d4ab364f64713257824340d
| 3,648,611
|
def main():
"""
Find the 10001th prime main method.
:param n: integer n
:return: 10001th prime
"""
primes = {2, }
for x in count(3, 2):
if prime(x):
primes.add(x)
if len(primes) >= 10001:
break
return sorted(primes)[-1]
|
3d4f492fe3d0d7e4991003020694434145cd5983
| 3,648,612
|
import re
def findDataById(objectids, level=None, version=None):
"""Return xml list of urls for each objectid."""
if sciflo.utils.isXml(objectids):
et, xmlNs = sciflo.utils.getXmlEtree(objectids)
objectids = et.xpath('.//_default:objectid/text()', xmlNs)
infoLoL = []
headerLoL = ['objectid', ['urls', 'url']]
if len(objectids) == 0: return sciflo.utils.list2Xml(infoLoL, headerLoL, 'resultSet', 'result')
datasetDict = {}
for regex in OBJECTIDREGEX_TO_DATASET_MAP.keys():
datasetDict[OBJECTIDREGEX_TO_DATASET_MAP[regex]] = []
for objectid in objectids:
found = False
for regex in OBJECTIDREGEX_TO_DATASET_MAP.keys():
if re.search(regex, objectid):
datasetDict[OBJECTIDREGEX_TO_DATASET_MAP[regex]].append(objectid)
found = True
break
if not found:
raise RuntimeError("Failed to match objectid %s to a dataset." % objectid)
datasetsToDo = [dataset for dataset in datasetDict.keys() if len(datasetDict[dataset]) > 0]
if len(datasetsToDo) > 1:
raise NotImplementedError("Multiple dataset handling not yet implemented.")
getDataByUrlFunc = eval(DATASET_TO_FUNC_MAP[datasetsToDo[0]])
urlDict = getDataByUrlFunc(datasetDict[datasetsToDo[0]], level=level, version=version)
objids = datasetDict[datasetsToDo[0]]
for objid in objectids:
urls = urlDict.get(objid, [])
infoLoL.append([objid, urls])
return sciflo.utils.list2Xml(infoLoL, headerLoL, 'resultSet', 'result')
|
85fdfcbd0e3733981e3ecfaf039abb5a7630cf35
| 3,648,613
|
import argparse
def node_parameter_parser(s):
"""Expects arguments as (address,range,probability)"""
try:
vals = s.split(",")
address = int(vals[0])
range = float(vals[1])
probability = float(vals[2])
return address, range, probability
except:
raise argparse.ArgumentTypeError("Node parameters must be address,range,probability")
|
a1d378d5f71b53fb187a920f71d7fc3373e775df
| 3,648,614
|
def launch(context, service_id, catalog_packages=""):
""" Initialize the module. """
return EnvManager(context=context, service_id=service_id,
catalog_packages=catalog_packages)
|
ba22d106efca9014d118daf4a8880bbcfe0c11fa
| 3,648,615
|
def handle_verification_token(request, token) -> [404, redirect]:
"""
This is just a reimplementation of what was used previously with OTC
https://github.com/EuroPython/epcon/pull/809/files
"""
token = get_object_or_404(Token, token=token)
logout(request)
user = token.user
user.is_active = True
user.save()
user = authenticate(uid=user.id)
login(request, user)
token.delete()
messages.success(request, 'Email verfication complete')
return redirect('user_panel:dashboard')
|
f369dc743d875bee09afb6e2ca4a2c313695bcbc
| 3,648,616
|
def multi_perspective_expand_for_2d(in_tensor, weights):
"""Given a 2d input tensor and weights of the appropriate shape,
weight the input tensor by the weights by multiplying them
together.
"""
# Shape: (num_sentence_words, 1, rnn_hidden_dim)
in_tensor_expanded = tf.expand_dims(in_tensor, axis=1)
# Shape: (1, multi_perspective_dims, rnn_hidden_dim)
weights_expanded = tf.expand_dims(weights, axis=0)
# Shape: (num_sentence_words, multi_perspective_dims, rnn_hidden_dim)
return tf.multiply(in_tensor_expanded, weights_expanded)
|
3d153e0bd9808dcd080f3d0ba75ee3bdd16123d3
| 3,648,617
|
import mimetypes
import os
import uuid
def get_temp_url(src, *args):
"""
Caches `data` in a file of type specified in `mimetype`, to the images/temp folder and returns a link to the data.
The generated URL is used only once, after it is accessed, the data is deleted from the machine.
Argument:
data: `bytes` | `str` - If it's type is bytes, it's the data needed to be cached; if it's type is str, it's a path to a file in `images/static`
mimetype: `str` - The type of data. Supported types are defined by `mimetypes` built-in module. Only needs to be provided for creating files from data in RAM.
Returns: `str` - URL to the data.
"""
if isinstance(src, bytes):
extension = mimetypes.guess_extension(args[0])
assert extension is not None, f"Unknown file format: {args[0]}"
data = src
elif isinstance(src, str):
extension = "." + src.split(".", maxsplit=1)[1]
src = os.path.join("images", "static", src)
data = open(src, "rb").read()
filename = uuid.uuid4().hex + extension
open(os.path.join("images", "temp", filename), "wb").write(data)
return "/".join(
[os.environ["HOST_URL"], os.environ["BOT_TOKEN"], "dynamic", filename]
)
|
f20d2625df13be848a0edc046b62533092d55578
| 3,648,618
|
import base64
def generateBasicAuthHeader(username, password):
"""
Generates a basic auth header
:param username: Username of user
:type username: str
:param password: Password of user
:type password: str
:return: Dict containing basic auth header
:rtype: dict
>>> generateBasicAuthHeader('test','test')
{'Authorization': 'Basic dGVzdDp0ZXN0'}
"""
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
return {'Authorization': 'Basic %s' % base64string}
|
835b3541212e05354a5573a5b35e8184231c7a6c
| 3,648,619
|
def correlation(df, target, limit=0, figsize=None, plot=True):
"""
Display Pearson correlation coefficient between target and numerical features
Return a list with low-correlated features if limit is provided
"""
numerical = list(df.select_dtypes(include=[np.number]))
numerical_f = [n for n in numerical if n not in target]
if not numerical_f:
print("There are no numerical features")
return
copy_df = df.copy()
for t in target:
if t not in numerical:
copy_df[t] = copy_df[t].astype(np.float16)
corr = copy_df.corr().loc[numerical_f, target].fillna(0).sort_values(target, ascending=False).round(2)
if not figsize:
figsize = (8, len(numerical_f) // 2 + 1)
corr.plot.barh(figsize=figsize)
plt.gca().invert_yaxis()
if limit>0:
plt.axvline(x=-limit, color='k', linestyle='--', )
plt.axvline(x=limit, color='k', linestyle='--', )
plt.xlabel('Pearson correlation coefficient')
plt.ylabel('feature')
if limit:
return corr.loc[abs(corr[target[0]]) < abs(limit)].index.tolist()
|
044f4708ad691ad4d275c58ff6dbd5a57a6a978d
| 3,648,620
|
def fasta_to_raw_observations(raw_lines):
"""
Assume that the first line is the header.
@param raw_lines: lines of a fasta file with a single sequence
@return: a single line string
"""
lines = list(gen_nonempty_stripped(raw_lines))
if not lines[0].startswith('>'):
msg = 'expected the first line to start with ">"'
raise ValueError(msg)
data_lines = lines[1:]
return ''.join(data_lines)
|
e75bd1f08ab68fa5a2a0d45cb23cba087e078d30
| 3,648,621
|
def pc_proj(data, pc, k):
"""
get the eigenvalues of principal component k
"""
return np.dot(data, pc[k].T) / (np.sqrt(np.sum(data**2, axis=1)) * np.sqrt(np.sum(pc[k]**2)))
|
768a4a9eba6427b9afda8c34326c140b360feec3
| 3,648,622
|
from datetime import datetime
def compare_time(time_str):
""" Compare timestamp at various hours """
t_format = "%Y-%m-%d %H:%M:%S"
if datetime.datetime.now() - datetime.timedelta(hours=3) <= \
datetime.datetime.strptime(time_str, t_format):
return 3
elif datetime.datetime.now() - datetime.timedelta(hours=6) <= \
datetime.datetime.strptime(time_str, t_format):
return 6
elif datetime.datetime.now() - datetime.timedelta(hours=12) <= \
datetime.datetime.strptime(time_str, t_format):
return 12
elif datetime.datetime.now() - datetime.timedelta(hours=24) <= \
datetime.datetime.strptime(time_str, t_format):
return 24
# Else catch all
return 100
|
b3d6d85e4559fa34f412ee81825e4f1214122534
| 3,648,623
|
def trendline(xd, yd, order=1, c='r', alpha=1, Rval=True):
"""Make a line of best fit,
Set Rval=False to print the R^2 value on the plot"""
#Only be sure you are using valid input (not NaN)
idx = np.isfinite(xd) & np.isfinite(yd)
#Calculate trendline
coeffs = np.polyfit(xd[idx], yd[idx], order)
intercept = coeffs[-1]
slope = coeffs[-2]
power = coeffs[0] if order == 2 else 0
minxd = np.min(xd)
maxxd = np.max(xd)
xl = np.array([minxd, maxxd])
yl = power * xl ** 2 + slope * xl + intercept
#Plot trendline
plt.plot(xl, yl, c, alpha=alpha)
#Calculate R Squared
p = np.poly1d(coeffs)
ybar = np.sum(yd) / len(yd)
ssreg = np.sum((p(xd) - ybar) ** 2)
sstot = np.sum((yd - ybar) ** 2)
Rsqr = ssreg / sstot
if not Rval:
#Plot R^2 value
plt.text(0.8 * maxxd + 0.2 * minxd, 0.8 * np.max(yd) + 0.2 * np.min(yd),
'$R^2 = %0.2f$' % Rsqr)
else:
#Return the R^2 value:
return Rsqr
|
af10643b0d74fd5f7a82f803bcef0bd9e379f086
| 3,648,624
|
import collections
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = collections.defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
|
bfbec3f25d1d44c9ff2568045508efbf2a2216d2
| 3,648,625
|
def evo():
"""Creates a test evolution xarray file."""
nevo = 20
gen_data = {1: np.arange(nevo),
2: np.sin(np.linspace(0, 2*np.pi, nevo)),
3: np.arange(nevo)**2}
data = {'X1': np.linspace(0.1, 1.7, nevo)*_unit_conversion['AU'],
'X2': np.deg2rad(np.linspace(60, 120, nevo)),
'X3': np.deg2rad(np.linspace(30, 80, nevo)),
'TIME': np.arange(nevo)*60*60*24,
'DT': np.arange(nevo),
'NSTEP': np.arange(nevo),
'D': scale_variable(gen_data[3], 'den')/_unit_conversion['den'],
'T': scale_variable(gen_data[2], 'temp'),
'V1': scale_variable(gen_data[1], 'vel')/_unit_conversion['vel'],
'V2': scale_variable(gen_data[2], 'vel')/_unit_conversion['vel'],
'V3': scale_variable(gen_data[3], 'vel')/_unit_conversion['vel'],
'B1': scale_variable(gen_data[1], 'mag'),
'B2': scale_variable(gen_data[2], 'mag'),
'B3': scale_variable(gen_data[3], 'mag'),
'DP': np.linspace(0, 0.1, nevo),
'BP': np.linspace(-1, 1, nevo)}
# Need to make data Arrays for all of the variables with the single dim
for x in data:
data[x] = xr.DataArray(data[x], dims=['nevo'])
ds = xr.Dataset(data, coords={'nevo': np.arange(nevo)})
ds.attrs = {'label': 'earth',
'rundate_cal': "2010-01-01T00"}
with NamedTemporaryFile(suffix='.nc') as f:
ds.to_netcdf(f.name)
evo = read_evo(f.name)
return evo
|
5873a3ee7a66d338a8df6b8cf6d26cf4cfeb41a3
| 3,648,626
|
from typing import List
from typing import Any
from typing import Optional
def jinja_calc_buffer(fields: List[Any], category: Optional[str] = None) -> int:
"""calculate buffer for list of fields based on their length"""
if category:
fields = [f for f in fields if f.category == category]
return max(len(f.to_string()) for f in fields)
|
c1f619acd8f68a9485026b344ece0c162c6f0fb0
| 3,648,627
|
def get_delete_op(op_name):
""" Determine if we are dealing with a deletion operation.
Normally we just do the logic in the last return. However, we may want
special behavior for some types.
:param op_name: ctx.operation.name.split('.')[-1].
:return: bool
"""
return 'delete' == op_name
|
508a9aad3ac6f4d58f5890c1abc138326747ee51
| 3,648,628
|
import json
def label(img_id):
"""
GET: return the current label for <img_id>
POST: update the current label for <img_id>
"""
if request.method == 'PUT':
#TODO: figure out how to get `request` to properly parse json on PUT
req_dict = json.loads(request.data.decode())
with connection.cursor() as cursor:
label_id = req_dict['label_id']
sql = "update {} set label_id={} where id={}".format(table, label_id, img_id)
cursor.execute(sql)
app.logger.info("updated id={} to label_id={}".format(img_id, label_id))
return jsonify(status='ok')
else:
with connection.cursor() as cursor:
sql = "select * from {} where id={}".format(table, img_id)
cursor.execute(sql)
app.logger.info("queried for id={}".format(img_id))
result = cursor.fetchone()
return jsonify(result)
|
c5e543fec0033f5888d99b7dce9f84faed70ff76
| 3,648,629
|
def random_radec(nsynths, ra_lim=[0, 360], dec_lim=[-90, 90],
random_state=None, **kwargs):
"""
Generate random ra and dec points within a specified range.
All angles in degrees.
Parameters
----------
nsynths : int
Number of random points to generate.
ra_lim : list-like, optional
ra limits.
dec_lim : list-like, optional
dec limits.
random_state : `None`, int, list of ints, or `numpy.random.RandomState`
If ``seed`` is `None`, return the `~numpy.random.RandomState`
singleton used by ``numpy.random``. If ``seed`` is an `int`,
return a new `~numpy.random.RandomState` instance seeded with
``seed``. If ``seed`` is already a `~numpy.random.RandomState`,
return it. Otherwise raise ``ValueError``.
Returns
-------
points : 2d ndarray
Random ra and dec points in degrees.
"""
rng = check_random_state(random_state)
ra_lim = np.deg2rad(np.asarray(ra_lim))
dec_lim = np.deg2rad(np.asarray(dec_lim))
zlim = np.sin(dec_lim)
z = zlim[0] + zlim.ptp() * rng.uniform(size=int(nsynths))
ra = ra_lim[0] + ra_lim.ptp() * rng.uniform(size=int(nsynths))
dec = np.arcsin(z)
ra, dec = np.rad2deg(ra), np.rad2deg(dec)
points = np.array([ra, dec]).T
return points
|
4649d5f8e42ada28ba45c46fac1174fc66976f16
| 3,648,630
|
def warmUp():
"""
Warm up the machine in AppEngine a few minutes before the daily standup
"""
return "ok"
|
f7c83939d224b06db26570ab8ccc8f04bd69c1d6
| 3,648,631
|
def setToC(doc, toc, collapse=1):
"""Create new outline tree (table of contents, TOC).
Args:
toc: (list, tuple) each entry must contain level, title, page and
optionally top margin on the page. None or '()' remove the TOC.
collapse: (int) collapses entries beyond this level. Zero or None
shows all entries unfolded.
Returns:
the number of inserted items, or the number of removed items respectively.
"""
if doc.isClosed or doc.isEncrypted:
raise ValueError("document closed or encrypted")
if not doc.isPDF:
raise ValueError("not a PDF")
if not toc: # remove all entries
return len(doc._delToC())
# validity checks --------------------------------------------------------
if type(toc) not in (list, tuple):
raise ValueError("'toc' must be list or tuple")
toclen = len(toc)
pageCount = doc.pageCount
t0 = toc[0]
if type(t0) not in (list, tuple):
raise ValueError("items must be sequences of 3 or 4 items")
if t0[0] != 1:
raise ValueError("hierarchy level of item 0 must be 1")
for i in list(range(toclen - 1)):
t1 = toc[i]
t2 = toc[i + 1]
if not -1 <= t1[2] <= pageCount:
raise ValueError("row %i: page number out of range" % i)
if (type(t2) not in (list, tuple)) or len(t2) not in (3, 4):
raise ValueError("bad row %i" % (i + 1))
if (type(t2[0]) is not int) or t2[0] < 1:
raise ValueError("bad hierarchy level in row %i" % (i + 1))
if t2[0] > t1[0] + 1:
raise ValueError("bad hierarchy level in row %i" % (i + 1))
# no formal errors in toc --------------------------------------------------
# --------------------------------------------------------------------------
# make a list of xref numbers, which we can use for our TOC entries
# --------------------------------------------------------------------------
old_xrefs = doc._delToC() # del old outlines, get their xref numbers
old_xrefs = [] # TODO do not reuse them currently
# prepare table of xrefs for new bookmarks
xref = [0] + old_xrefs
xref[0] = doc._getOLRootNumber() # entry zero is outline root xref#
if toclen > len(old_xrefs): # too few old xrefs?
for i in range((toclen - len(old_xrefs))):
xref.append(doc._getNewXref()) # acquire new ones
lvltab = {0: 0} # to store last entry per hierarchy level
# ------------------------------------------------------------------------------
# contains new outline objects as strings - first one is the outline root
# ------------------------------------------------------------------------------
olitems = [{"count": 0, "first": -1, "last": -1, "xref": xref[0]}]
# ------------------------------------------------------------------------------
# build olitems as a list of PDF-like connnected dictionaries
# ------------------------------------------------------------------------------
for i in range(toclen):
o = toc[i]
lvl = o[0] # level
title = getPDFstr(o[1]) # title
pno = min(doc.pageCount - 1, max(0, o[2] - 1)) # page number
page = doc[pno] # load the page
ictm = ~page._getTransformation() # get inverse transformation matrix
top = Point(72, 36) * ictm # default top location
dest_dict = {"to": top, "kind": LINK_GOTO} # fall back target
if o[2] < 0:
dest_dict["kind"] = LINK_NONE
if len(o) > 3: # some target is specified
if type(o[3]) in (int, float): # convert a number to a point
dest_dict["to"] = Point(72, o[3]) * ictm
else: # if something else, make sure we have a dict
dest_dict = o[3] if type(o[3]) is dict else dest_dict
if "to" not in dest_dict: # target point not in dict?
dest_dict["to"] = top # put default in
else: # transform target to PDF coordinates
point = dest_dict["to"] * ictm
dest_dict["to"] = point
d = {}
d["first"] = -1
d["count"] = 0
d["last"] = -1
d["prev"] = -1
d["next"] = -1
d["dest"] = getDestStr(page.xref, dest_dict)
d["top"] = dest_dict["to"]
d["title"] = title
d["parent"] = lvltab[lvl - 1]
d["xref"] = xref[i + 1]
lvltab[lvl] = i + 1
parent = olitems[lvltab[lvl - 1]] # the parent entry
if collapse and lvl > collapse: # suppress expansion
parent["count"] -= 1 # make /Count negative
else:
parent["count"] += 1 # positive /Count
if parent["first"] == -1:
parent["first"] = i + 1
parent["last"] = i + 1
else:
d["prev"] = parent["last"]
prev = olitems[parent["last"]]
prev["next"] = i + 1
parent["last"] = i + 1
olitems.append(d)
# ------------------------------------------------------------------------------
# now create each outline item as a string and insert it in the PDF
# ------------------------------------------------------------------------------
for i, ol in enumerate(olitems):
txt = "<<"
if ol["count"] != 0:
txt += "/Count %i" % ol["count"]
try:
txt += ol["dest"]
except:
pass
try:
if ol["first"] > -1:
txt += "/First %i 0 R" % xref[ol["first"]]
except:
pass
try:
if ol["last"] > -1:
txt += "/Last %i 0 R" % xref[ol["last"]]
except:
pass
try:
if ol["next"] > -1:
txt += "/Next %i 0 R" % xref[ol["next"]]
except:
pass
try:
if ol["parent"] > -1:
txt += "/Parent %i 0 R" % xref[ol["parent"]]
except:
pass
try:
if ol["prev"] > -1:
txt += "/Prev %i 0 R" % xref[ol["prev"]]
except:
pass
try:
txt += "/Title" + ol["title"]
except:
pass
if i == 0: # special: this is the outline root
txt += "/Type/Outlines" # so add the /Type entry
txt += ">>"
doc._updateObject(xref[i], txt) # insert the PDF object
doc.initData()
return toclen
|
a07648574b1bea850b4739c0e55505bbb2efe139
| 3,648,632
|
def _map_dvector_permutation(rd,d,eps):
"""Maps the basis vectors to a permutation.
Args:
rd (array-like): 2D array of the rotated basis vectors.
d (array-like): 2D array of the original basis vectors.
eps (float): Finite precision tolerance.
Returns:
RP (list): The permutation of the basis vectors.
"""
n_d = len(rd) # of d-vectors
found = [False]*n_d
RP = []
for iD in range(n_d):
for jD in range(n_d):
if found[jD]:
continue
if np.allclose(rd[iD],d[jD],atol=eps,rtol=0):
RP.append(jD)
found[jD] = True
break
if len(RP) != len(d): #pragma: no cover
print("d-vector didn't permute in map_dvector_permutation "
"This usually means that the d-set from the input structure and the d-set"
" from the struct_enum.out have a different origin or don't live in the same"
" unit cell. This probably isn't your fault---the code should overcome this."
,RP)
exit()
return(RP)
|
3060cb093d769059f2af0635aafc0bd0fe94ad86
| 3,648,633
|
from astropy.time import Time
import os
import time
import glob
from datetime import datetime
import re
from io import StringIO
import shutil
from re import DEBUG
import subprocess
def dump_lightcurves_with_grcollect(photfileglob, lcdir, maxmemory,
objectidcol=3,
lcextension='grcollectilc',
observatory='tess'):
"""
Given a list of photometry files (text files at various times output by
fiphot with rows that are objects), make lightcurve files (text files for
various objects whose rows are times).
(For TESS, the timestamp imprinted here is JD_UTC midtime.)
This routine uses the `fitsh` routine `grcollect` to do the dumping. This
is comparably fast to the postgresql indexing approach without heavy
optimization (dumps ~1 photometry file per second).
An important intermediate step, implemented here, is prepending times and
filenames to *.iphot lightcurve files, to make *.iphottemp files.
*.iphot photometry files look like:
HAT-381-0000004 1482.093 521.080 1482.10899 521.079941
1.86192159 -0.217043415 -0.152707564 0.35 0.41
583942.10 334.31 5.54226 0.00062 G
605285.46 340.12 5.50328 0.00061 G
619455.38 344.29 5.47816 0.00060 G
Args:
photfileglob (str): bash glob to pass to grcollect, e.g., the first
input line below
grcollect /home/mypath/rsub-3f62ef9f-tess201913112*.iphot \
--col-base 1 --prefix /nfs/phtess1/ar1/TESS/SIMFFI/LC/ISP_1-1/ \
--extension grcollectilc --max-memory 4g
objectidcol (int): column of object id in *.iphottemp files (default:
3)
lcdir (str): directory where lightcurves get dumped
maxmemory (str): e.g., "2g", "100m" for 2gb, or 100mb. Maximum amount
of memory for `grcollect`. See https://fitsh.net/wiki/man/grcollect for
terse details.
Keyword args:
lcextension (str): e.g., "grcollectilc" for the extension you want your
dumped lightcurves to have. Also common is "ilc" for "image-subtracted
lightcurve".
Returns:
diddly-squat.
"""
if not os.path.exists(lcdir):
os.mkdir(lcdir)
starttime = time.time()
photpaths = glob.glob(photfileglob)
print('%sZ: called dump lightcurves for %d photfiles' %
(datetime.utcnow().isoformat(), len(photpaths)))
if len(photpaths)==0:
print('ERR! %sZ: failed to find %s, continuing' %
(datetime.utcnow().isoformat(), frametoshift))
return 0
# *.iphot files need to have timestamp and filenames prepended on each
# line, otherwise lightcurves will have no times. make the correctly
# formatted ".iphottemp" files here.
for ix, photpath in enumerate(photpaths):
if observatory=='tess':
framekey = re.findall(
'tess20.*?-[0-9][0-9][0-9][0-9]_cal_img_bkgdsub', photpath)
if not len(framekey) == 1:
raise AssertionError(
'expected only one photframe, got {:s}'.
format(repr(framekey)))
originalframe = os.path.join(os.path.dirname(photpath),
framekey[0]+'.fits')
elif observatory=='hatpi':
framekey = re.findall('(.-.{7}_.)\.iphot', photpath)
assert len(framekey) == 1, 'HATPI specific regex!'
originalframe = os.path.join(os.path.dirname(photpath),
framekey[0]+'.fits')
else:
raise NotImplementedError
# check these files exist, and populate the dict if they do
if os.path.exists(originalframe):
tempphotpath = photpath.replace('.iphot','.iphottemp')
if os.path.exists(tempphotpath):
print('%sZ: skipping %d/%d frame, found .iphottemp file %s' %
(datetime.utcnow().isoformat(), ix, len(photpaths),
tempphotpath))
continue
print('%sZ: writing %d/%d frame, %s to .iphottemp file %s' %
(datetime.utcnow().isoformat(), ix, len(photpaths), photpath,
tempphotpath))
# this is the ORIGINAL FITS frame, since the subtracted one
# contains some weird JD header (probably inherited from the
# photref frame)
if observatory=='tess':
# observation start and stop time as BJD_UTC calendar dates.
# calculated by SPOC. (these include the leapseconds). the
# wrong barycentric correction has been applied to all
# available timestamps (and the sign convention for how the
# correction is applied -- is it added, or subtracted? -- has
# been confirmed via private comm. with Jon Jenkins)
tstart_bjd_utc_str = get_header_keyword(
originalframe, 'DATE-OBS')
tstop_bjd_utc_str = get_header_keyword(
originalframe, 'DATE-END')
ltt_barycenter_spoc = get_header_keyword(
originalframe, 'BARYCORR')
# record the midtime in JD_UTC with grcollect. barycentric
# correction comes later. (once ra/dec are accessible).
tstart_bjd_utc = Time(tstart_bjd_utc_str, format='isot',
scale='utc')
tstop_bjd_utc = Time(tstop_bjd_utc_str, format='isot',
scale='utc')
tmid_bjd_utc = (
tstart_bjd_utc.jd + (tstop_bjd_utc.jd - tstart_bjd_utc.jd)/2.
)
# WANT: bjd_tdb_me = jd_utc_spoc + ltt_barycenter_me + leapseconds (eq 1)
# HAVE: bjd_utc_spoc = jd_utc_spoc + ltt_barycenter_spoc (eq 2)
# use eq (2) to get jd_tdb_spoc:
tmid_jd_utc = tmid_bjd_utc - ltt_barycenter_spoc
frametime = tmid_jd_utc
elif observatory=='hatpi':
frametime = get_header_keyword(originalframe, 'JD')
else:
raise NotImplementedError
# now open the phot file, read it, and write to the tempphot file.
output = StringIO()
with open(photpath, 'rb') as photfile:
# write time precise to 1e-7 days = 8.6 milliseconds.
for line in photfile:
output.write('%.7f\t%s\t%s' % (
frametime, framekey[0], line.decode('utf-8'))
)
with open(tempphotpath, 'w') as tempphotfile:
output.seek(0)
shutil.copyfileobj(output, tempphotfile)
output.close()
grcollectglob = photfileglob.replace('.iphot', '.iphottemp')
cmdtorun = GRCOLLECTCMD.format(fiphotfileglob=grcollectglob,
lcdir=lcdir,
objectidcol=objectidcol,
lcextension=lcextension,
maxmemory=maxmemory)
if DEBUG:
print(cmdtorun)
# execute the grcollect shell command. NB we execute through a shell
# interpreter to get correct wildcards applied.
grcollectproc = subprocess.Popen(cmdtorun, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = grcollectproc.communicate()
if grcollectproc.returncode == 0:
print('%sZ: grcollect dump succeeded' % datetime.utcnow().isoformat())
else:
print('ERR! %sZ: grcollect failed' % datetime.utcnow().isoformat())
print('error was %s' % stderr )
print('%sZ: done, time taken: %.2f minutes' %
(datetime.utcnow().isoformat(), (time.time() - starttime)/60.0))
|
ba3dd1d1334964752596d543a629dfcfe9fa8b93
| 3,648,634
|
import re
def varPostV(self,name,value):
""" Moving all the data from entry to treeview """
regex = re.search("-[@_!#$%^&*()<>?/\|}{~: ]", name) #Prevent user from giving special character and space character
print(regex)
if not regex == None:
tk.messagebox.showerror("Forbidden Entry","The variable name for vehicle must not contain special character or space character")
return None
if not name.strip():
tk.messagebox.showerror("Empty entry","The variable name for vehicle is empty")
return None
if not value.strip():
tk.messagebox.showerror("Empty entry","The variable value for vechicle is empty")
return None
if not value.isdigit():
tk.messagebox.showerror("Empty entry","The variable value for vechicle must be number")
return None
self.varVContent = self.varDispV
self.varVContent.insert("",index="end",text=name,value=float(value))
|
07e108df0ff057ee42e39155562b32fa651ba625
| 3,648,635
|
def _mysql_int_length(subtype):
"""Determine smallest field that can hold data with given length."""
try:
length = int(subtype)
except ValueError:
raise ValueError(
'Invalid subtype for Integer column: {}'.format(subtype)
)
if length < 3:
kind = 'TINYINT'
elif length < 4:
kind = 'SMALLINT'
elif length < 7:
kind = 'MEDIUMINT'
elif length <= 10:
kind = 'INT'
else:
kind = 'BIGINT'
return '{}({})'.format(kind, length)
|
3a0e84a3ac602bb018ae7056f4ad06fe0dcab53b
| 3,648,636
|
def get_ci(vals, percent=0.95):
"""Confidence interval for `vals` from the Students' t
distribution. Uses `stats.t.interval`.
Parameters
----------
percent : float
Size of the confidence interval. The default is 0.95. The only
requirement is that this be above 0 and at or below 1.
Returns
-------
tuple
The first member is the upper end of the interval, the second
the lower end of the interval.
"""
if len(set(vals)) == 1:
return (vals[0], vals[0])
mu = np.mean(vals)
df = len(vals)-1
sigma = np.std(vals) / np.sqrt(len(vals))
return stats.t.interval(percent, df, loc=mu, scale=sigma)
|
1d5e311aab4620e070efcf685505af4a072e56eb
| 3,648,637
|
from typing import OrderedDict
from datetime import datetime
def kb_overview_rows(mode=None, max=None, locale=None, product=None, category=None):
"""Return the iterable of dicts needed to draw the new KB dashboard
overview"""
if mode is None:
mode = LAST_30_DAYS
docs = Document.objects.filter(locale=settings.WIKI_DEFAULT_LANGUAGE,
is_archived=False,
is_template=False)
docs = docs.exclude(html__startswith=REDIRECT_HTML)
select = OrderedDict([
('num_visits', 'SELECT `wdv`.`visits` '
'FROM `dashboards_wikidocumentvisits` as `wdv` '
'WHERE `wdv`.`period`=%s '
'AND `wdv`.`document_id`=`wiki_document`.`id`'),
])
docs = docs.extra(select=select,
select_params=(mode,))
if product:
docs = docs.filter(products__in=[product])
if category:
docs = docs.filter(category__in=[category])
docs = docs.order_by('-num_visits', 'title')
if max:
docs = docs[:max]
rows = []
if docs.count():
max_visits = docs[0].num_visits
for d in docs:
data = {
'url': reverse('wiki.document', args=[d.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE),
'trans_url': reverse('wiki.show_translations', args=[d.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE),
'title': d.title,
'num_visits': d.num_visits,
'ready_for_l10n': d.revisions.filter(is_approved=True,
is_ready_for_localization=True).exists()
}
if d.current_revision:
data['expiry_date'] = d.current_revision.expires
if d.num_visits:
data['visits_ratio'] = float(d.num_visits) / max_visits
if 'expiry_date' in data and data['expiry_date']:
data['stale'] = data['expiry_date'] < datetime.now()
# Check L10N status
if d.current_revision:
unapproved_revs = d.revisions.filter(
reviewed=None, id__gt=d.current_revision.id)[:1]
else:
unapproved_revs = d.revisions.all()
if unapproved_revs.count():
data['revision_comment'] = unapproved_revs[0].comment
else:
data['latest_revision'] = True
# Get the translated doc
if locale != settings.WIKI_DEFAULT_LANGUAGE:
transdoc = d.translations.filter(
locale=locale,
is_archived=False).first()
if transdoc:
data['needs_update'] = transdoc.is_outdated()
else: # For en-US we show the needs_changes comment.
data['needs_update'] = d.needs_change
data['needs_update_comment'] = d.needs_change_comment
rows.append(data)
return rows
|
768d9aacf564e17b26beb53f924575202fea3276
| 3,648,638
|
def test_query_devicecontrolalert_facets(monkeypatch):
"""Test a Device Control alert facet query."""
_was_called = False
def _run_facet_query(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/alerts/devicecontrol/_facet"
assert body == {"query": "Blort", "criteria": {"workflow": ["OPEN"]},
"terms": {"rows": 0, "fields": ["REPUTATION", "STATUS"]},
"rows": 100}
_was_called = True
return StubResponse({"results": [{"field": {},
"values": [{"id": "reputation", "name": "reputationX", "total": 4}]},
{"field": {},
"values": [{"id": "status", "name": "statusX", "total": 9}]}]})
api = CBCloudAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbc_sdk_api(monkeypatch, api, POST=_run_facet_query)
query = api.select(DeviceControlAlert).where("Blort").set_workflows(["OPEN"])
f = query.facets(["REPUTATION", "STATUS"])
assert _was_called
assert f == [{"field": {}, "values": [{"id": "reputation", "name": "reputationX", "total": 4}]},
{"field": {}, "values": [{"id": "status", "name": "statusX", "total": 9}]}]
|
6a0182a7b9ad15e5194bcc80aba19ab386e71f35
| 3,648,639
|
from typing import List
from typing import Optional
from typing import Set
import math
def plot_logs(experiments: List[Summary],
smooth_factor: float = 0,
ignore_metrics: Optional[Set[str]] = None,
pretty_names: bool = False,
include_metrics: Optional[Set[str]] = None) -> Figure:
"""A function which will plot experiment histories for comparison viewing / analysis.
Args:
experiments: Experiment(s) to plot.
smooth_factor: A non-negative float representing the magnitude of gaussian smoothing to apply (zero for none).
pretty_names: Whether to modify the metric names in graph titles (True) or leave them alone (False).
ignore_metrics: Any keys to ignore during plotting.
include_metrics: A whitelist of keys to include during plotting. If None then all will be included.
Returns:
The handle of the pyplot figure.
"""
# Sort to keep same colors between multiple runs of visualization
experiments = humansorted(to_list(experiments), lambda exp: exp.name)
n_experiments = len(experiments)
if n_experiments == 0:
return make_subplots()
ignore_keys = ignore_metrics or set()
ignore_keys = to_set(ignore_keys)
ignore_keys |= {'epoch'}
include_keys = to_set(include_metrics)
# TODO: epoch should be indicated on the axis (top x axis?). Problem - different epochs per experiment.
# TODO: figure out how ignore_metrics should interact with mode
# TODO: when ds_id switches during training, prevent old id from connecting with new one (break every epoch?)
ds_ids = set()
metric_histories = defaultdict(_MetricGroup) # metric: MetricGroup
for idx, experiment in enumerate(experiments):
history = experiment.history
# Since python dicts remember insertion order, sort the history so that train mode is always plotted on bottom
for mode, metrics in sorted(history.items(),
key=lambda x: 0 if x[0] == 'train' else 1 if x[0] == 'eval' else 2 if x[0] == 'test'
else 3 if x[0] == 'infer' else 4):
for metric, step_val in metrics.items():
base_metric, ds_id, *_ = f'{metric}|'.split('|') # Plot acc|ds1 and acc|ds2 on same acc graph
if len(step_val) == 0:
continue # Ignore empty metrics
if metric in ignore_keys or base_metric in ignore_keys:
continue
# Here we intentionally check against metric and not base_metric. If user wants to display per-ds they
# can specify that in their include list: --include mcc 'mcc|usps'
if include_keys and metric not in include_keys:
continue
metric_histories[base_metric].add(idx, mode, ds_id, step_val)
ds_ids.add(ds_id)
metric_list = list(sorted(metric_histories.keys()))
if len(metric_list) == 0:
return make_subplots()
ds_ids = humansorted(ds_ids) # Sort them to have consistent ordering (and thus symbols) between plot runs
n_plots = len(metric_list)
if len(ds_ids) > 9: # 9 b/c None is included
print("FastEstimator-Warn: Plotting more than 8 different datasets isn't well supported. Symbols will be "
"reused.")
# Non-Shared legends aren't supported yet. If they get supported then maybe can have that feature here too.
# https://github.com/plotly/plotly.js/issues/5099
# https://github.com/plotly/plotly.js/issues/5098
# map the metrics into an n x n grid, then remove any extra columns. Final grid will be n x m with m <= n
n_rows = math.ceil(math.sqrt(n_plots))
n_cols = math.ceil(n_plots / n_rows)
metric_grid_location = {}
nd1_metrics = []
idx = 0
for metric in metric_list:
if metric_histories[metric].ndim() == 1:
# Delay placement of the 1D plots until the end
nd1_metrics.append(metric)
else:
metric_grid_location[metric] = (idx // n_cols, idx % n_cols)
idx += 1
for metric in nd1_metrics:
metric_grid_location[metric] = (idx // n_cols, idx % n_cols)
idx += 1
titles = [k for k, v in sorted(list(metric_grid_location.items()), key=lambda e: e[1][0] * n_cols + e[1][1])]
if pretty_names:
titles = [prettify_metric_name(title) for title in titles]
fig = make_subplots(rows=n_rows, cols=n_cols, subplot_titles=titles, shared_xaxes='all')
fig.update_layout({'plot_bgcolor': '#FFF',
'hovermode': 'closest',
'margin': {'t': 50},
'modebar': {'add': ['hoverclosest', 'hovercompare'],
'remove': ['select2d', 'lasso2d']},
'legend': {'tracegroupgap': 5,
'font': {'size': 11}}})
# Set x-labels
for idx, metric in enumerate(titles, start=1):
plotly_idx = idx if idx > 1 else ""
x_axis_name = f'xaxis{plotly_idx}'
y_axis_name = f'yaxis{plotly_idx}'
if metric_histories[metric].ndim() > 1:
fig['layout'][x_axis_name]['title'] = 'Steps'
fig['layout'][x_axis_name]['showticklabels'] = True
fig['layout'][x_axis_name]['linecolor'] = "#BCCCDC"
fig['layout'][y_axis_name]['linecolor'] = "#BCCCDC"
else:
# Put blank data onto the axis to instantiate the domain
row, col = metric_grid_location[metric][0], metric_grid_location[metric][1]
fig.add_annotation(text='', showarrow=False, row=row + 1, col=col + 1)
# Hide the axis stuff
fig['layout'][x_axis_name]['showgrid'] = False
fig['layout'][x_axis_name]['zeroline'] = False
fig['layout'][x_axis_name]['visible'] = False
fig['layout'][y_axis_name]['showgrid'] = False
fig['layout'][y_axis_name]['zeroline'] = False
fig['layout'][y_axis_name]['visible'] = False
# If there is only 1 experiment, we will use alternate colors based on mode
color_offset = defaultdict(lambda: 0)
n_colors = n_experiments
if n_experiments == 1:
n_colors = 4
color_offset['eval'] = 1
color_offset['test'] = 2
color_offset['infer'] = 3
colors = get_colors(n_colors=n_colors)
alpha_colors = get_colors(n_colors=n_colors, alpha=0.3)
# exp_id : {mode: {ds_id: {type: True}}}
add_label = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: True))))
# {row: {col: (x, y)}}
ax_text = defaultdict(lambda: defaultdict(lambda: (0.0, 0.9))) # Where to put the text on a given axis
# Set up ds_id markers. The empty ds_id will have no extra marker. After that there are 4 configurations of 3-arm
# marker, followed by 'x', '+', '*', and pound. After that it will just repeat the symbol set.
ds_id_markers = [None, 37, 38, 39, 40, 34, 33, 35, 36] # https://plotly.com/python/marker-style/
ds_id_markers = {k: v for k, v in zip(ds_ids, cycle(ds_id_markers))}
# Plotly doesn't support z-order, so delay insertion until all the plots are figured out:
# https://github.com/plotly/plotly.py/issues/2345
z_order = defaultdict(list) # {order: [(plotly element, row, col), ...]}
# Figure out the legend ordering
legend_order = []
for exp_idx, experiment in enumerate(experiments):
for metric, group in metric_histories.items():
for mode in group.modes(exp_idx):
for ds_id in group.ds_ids(exp_idx, mode):
ds_title = f"{ds_id} " if ds_id else ''
title = f"{experiment.name} ({ds_title}{mode})" if n_experiments > 1 else f"{ds_title}{mode}"
legend_order.append(title)
legend_order.sort()
legend_order = {legend: order for order, legend in enumerate(legend_order)}
# Actually do the plotting
for exp_idx, experiment in enumerate(experiments):
for metric, group in metric_histories.items():
row, col = metric_grid_location[metric][0], metric_grid_location[metric][1]
if group.ndim() == 1:
# Single value
for mode in group.modes(exp_idx):
for ds_id in group.ds_ids(exp_idx, mode):
ds_title = f"{ds_id} " if ds_id else ''
prefix = f"{experiment.name} ({ds_title}{mode})" if n_experiments > 1 else f"{ds_title}{mode}"
plotly_idx = row * n_cols + col + 1 if row * n_cols + col + 1 > 1 else ''
fig.add_annotation(text=f"{prefix}: {group.get_val(exp_idx, mode, ds_id)}",
font={'color': colors[exp_idx + color_offset[mode]]},
showarrow=False,
xref=f'x{plotly_idx} domain',
xanchor='left',
x=ax_text[row][col][0],
yref=f'y{plotly_idx} domain',
yanchor='top',
y=ax_text[row][col][1],
exclude_empty_subplots=False)
ax_text[row][col] = (ax_text[row][col][0], ax_text[row][col][1] - 0.1)
if ax_text[row][col][1] < 0:
ax_text[row][col] = (ax_text[row][col][0] + 0.5, 0.9)
elif group.ndim() == 2:
for mode, dsv in group[exp_idx].items():
color = colors[exp_idx + color_offset[mode]]
for ds_id, data in dsv.items():
ds_title = f"{ds_id} " if ds_id else ''
title = f"{experiment.name} ({ds_title}{mode})" if n_experiments > 1 else f"{ds_title}{mode}"
if data.shape[0] < 2:
x = data[0][0]
y = data[0][1]
y_min = None
y_max = None
if isinstance(y, ValWithError):
y_min = y.y_min
y_max = y.y_max
y = y.y
marker_style = 'circle' if mode == 'train' else 'diamond' if mode == 'eval' \
else 'square' if mode == 'test' else 'hexagram'
limit_data = [(y_max, y_min)] if y_max is not None and y_min is not None else None
tip_text = "%{x}: (%{customdata[1]:.3f}, %{y:.3f}, %{customdata[0]:.3f})" if \
limit_data is not None else "%{x}: %{y:.3f}"
error_y = None if limit_data is None else {'type': 'data',
'symmetric': False,
'array': [y_max - y],
'arrayminus': [y - y_min]}
z_order[2].append((go.Scatter(x=[x],
y=[y],
name=title,
legendgroup=title,
customdata=limit_data,
hovertemplate=tip_text,
mode='markers',
marker={'color': color,
'size': 12,
'symbol': _symbol_mash(marker_style,
ds_id_markers[ds_id]),
'line': {'width': 1.5,
'color': 'White'}},
error_y=error_y,
showlegend=add_label[exp_idx][mode][ds_id]['patch'],
legendrank=legend_order[title]),
row,
col))
add_label[exp_idx][mode][ds_id]['patch'] = False
else:
# We can draw a line
y = data[:, 1]
y_min = None
y_max = None
if isinstance(y[0], ValWithError):
y = np.stack(y)
y_min = y[:, 0]
y_max = y[:, 2]
y = y[:, 1]
if smooth_factor != 0:
y_min = gaussian_filter1d(y_min, sigma=smooth_factor)
y_max = gaussian_filter1d(y_max, sigma=smooth_factor)
# TODO - for smoothed lines, plot original data in background but greyed out
if smooth_factor != 0:
y = gaussian_filter1d(y, sigma=smooth_factor)
x = data[:, 0]
linestyle = 'solid' if mode == 'train' else 'dash' if mode == 'eval' else 'dot' if \
mode == 'test' else 'dashdot'
limit_data = [(mx, mn) for mx, mn in zip(y_max, y_min)] if y_max is not None and y_min is \
not None else None
tip_text = "%{x}: (%{customdata[1]:.3f}, %{y:.3f}, %{customdata[0]:.3f})" if \
limit_data is not None else "%{x}: %{y:.3f}"
z_order[1].append((go.Scatter(x=x,
y=y,
name=title,
legendgroup=title,
mode="lines+markers" if ds_id_markers[ds_id] else 'lines',
marker={'color': color,
'size': 8,
'line': {'width': 2,
'color': 'DarkSlateGrey'},
'maxdisplayed': 10,
'symbol': ds_id_markers[ds_id]},
line={'dash': linestyle,
'color': color},
customdata=limit_data,
hovertemplate=tip_text,
showlegend=add_label[exp_idx][mode][ds_id]['line'],
legendrank=legend_order[title]),
row,
col))
add_label[exp_idx][mode][ds_id]['line'] = False
if limit_data is not None:
z_order[0].append((go.Scatter(x=x,
y=y_max,
mode='lines',
line={'width': 0},
legendgroup=title,
showlegend=False,
hoverinfo='skip'),
row,
col))
z_order[0].append((go.Scatter(x=x,
y=y_min,
mode='lines',
line={'width': 0},
fillcolor=alpha_colors[exp_idx + color_offset[mode]],
fill='tonexty',
legendgroup=title,
showlegend=False,
hoverinfo='skip'),
row,
col))
else:
# Some kind of image or matrix. Not implemented yet.
pass
for z in sorted(list(z_order.keys())):
plts = z_order[z]
for plt, row, col in plts:
fig.add_trace(plt, row=row + 1, col=col + 1)
# If inside a jupyter notebook then force the height based on number of rows
if in_notebook():
fig.update_layout(height=280 * n_rows)
return fig
|
742ba64568f89e679ff3ec8dbb60894130f75a0d
| 3,648,640
|
def regularity(sequence):
"""
Compute the regularity of a sequence.
The regularity basically measures what percentage of a user's
visits are to a previously visited place.
Parameters
----------
sequence : list
A list of symbols.
Returns
-------
float
1 minus the ratio between unique and total symbols in the sequence.
"""
n = len(sequence)
n_unique = len(set(sequence))
if n_unique <= 1:
return 1.0
if n_unique == n:
return .0
return 1 - (n_unique / n)
|
e03d38cc3882ea5d0828b1f8942039865a90d49d
| 3,648,641
|
from typing import List
def _make_tick_labels(
tick_values: List[float], axis_subtractor: float, tick_divisor_power: int,
) -> List[str]:
"""Given a collection of ticks, return a formatted version.
Args:
tick_values (List[float]): The ticks positions in ascending
order.
tick_divisor_power (int): The power of ten the tick labels will
be divided by.
axis_subtractor (float): The amount to subtract from the tick
values.
Returns:
Generator[str, None, None]: The generated tick labels.
"""
tick_divisor_prefix = _get_metric_prefix(tick_divisor_power)
return [
f"{(tick - axis_subtractor) / 10 ** tick_divisor_power:0.2f}"
f"{tick_divisor_prefix}"
for tick in tick_values
]
|
13aaddc38d5fdc05d38a97c5ca7278e9898a8ed1
| 3,648,642
|
import pickle
def load_coco(dataset_file, map_file):
"""
Load preprocessed MSCOCO 2017 dataset
"""
print('\nLoading dataset...')
h5f = h5py.File(dataset_file, 'r')
x = h5f['x'][:]
y = h5f['y'][:]
h5f.close()
split = int(x.shape[0] * 0.8) # 80% of data is assigned to the training set
x_train, y_train = x[:split], y[:split]
x_test, y_test = x[split:], y[split:]
with open(map_file, 'rb') as mapping:
category_id_map = pickle.load(mapping)
id_category = category_id_map['id_category']
print('Done.')
return (x_train, y_train), (x_test, y_test), id_category
|
b924b13411075f569b8cd73ee6d5414a4f932a17
| 3,648,643
|
def eval_rule(call_fn, abstract_eval_fn, *args, **kwargs):
"""
Python Evaluation rule for a numba4jax function respecting the
XLA CustomCall interface.
Evaluates `outs = abstract_eval_fn(*args)` to compute the output shape
and preallocate them, then executes `call_fn(*outs, *args)` which is
the Numba kernel.
Args:
call_fn: a (numba.jit) function respecting the calling convention of
XLA CustomCall, taking first the outputs by reference then the
inputs.
abstract_eval_fn: The abstract evaluation function respecting jax
interface
args: The arguments to the `call_fn`
kwargs: Optional keyword arguments for the numba function.
"""
# compute the output shapes
output_shapes = abstract_eval_fn(*args)
# Preallocate the outputs
outputs = tuple(np.empty(shape.shape, dtype=shape.dtype) for shape in output_shapes)
# convert inputs to a tuple
inputs = tuple(np.asarray(arg) for arg in args)
# call the kernel
call_fn(outputs + inputs, **kwargs)
# Return the outputs
return tuple(outputs)
|
6e52d9bdeda86c307390b95237589bd9315829ad
| 3,648,644
|
def bev_box_overlap(boxes, qboxes, criterion=-1):
"""
Calculate rotated 2D iou.
Args:
boxes:
qboxes:
criterion:
Returns:
"""
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
|
4614a3f8c8838fc4f9f32c2ef625274239f36acf
| 3,648,645
|
def filter_shape(image):
"""画像にぼかしフィルターを適用。"""
weight = (
(1, 1, 1),
(1, 1, 1),
(1, 1, 1)
)
offset = 0
div = 9
return _filter(image, weight, offset, div)
|
f16c181c0eb24a35dfc70df25f1977a04dc9b0f5
| 3,648,646
|
import sys
def ppretty(obj, indent=' ', depth=4, width=72, seq_length=5,
show_protected=False, show_private=False, show_static=False, show_properties=False, show_address=False,
str_length=50):
"""Represents any python object in a human readable format.
:param obj: An object to represent.
:type obj: object
:param indent: Fill string for indents. Default is ' '.
:type indent: str
:param depth: Depth of introspecion. Default is 4.
:type depth: int
:param width: Width of line in output string. It may be exceeded when representation doesn't fit. Default is 72.
:type width: int
:param seq_length: Maximum sequence length. Also, used for object's members enumeration. Default is 5.
:type seq_length: int
:param show_protected: Examine protected members. Default is False.
:type show_protected: bool
:param show_private: Examine private members. To take effect show_protected must be set to True. Default is False.
:type show_private: bool
:param show_static: Examine static members. Default is False.
:type show_static: bool
:param show_properties: Examine properties members. Default is False.
:type show_properties: bool
:param show_address: Show address. Default is False.
:type show_address: bool
:param str_length: Maximum string length. Default is 50.
:type str_length: int
:return: The final representation of the object.
:rtype: str
"""
seq_brackets = {list: ('[', ']'), tuple: ('(', ')'), set: ('set([', '])'), dict: ('{', '}')}
seq_types = tuple(seq_brackets.keys())
basestring_type = basestring if sys.version_info[0] < 3 else str
def inspect_object(current_obj, current_depth, current_width, seq_type_descendant=False):
inspect_nested_object = partial(inspect_object,
current_depth=current_depth - 1,
current_width=current_width - len(indent))
# Basic types
if isinstance(current_obj, Number):
return [repr(current_obj)]
# Strings
if isinstance(current_obj, basestring_type):
if len(current_obj) <= str_length:
return [repr(current_obj)]
return [repr(current_obj[:int(str_length / 2)] + '...' + current_obj[int((1 - str_length) / 2):])]
# Class object
if isinstance(current_obj, type):
module = current_obj.__module__ + '.' if hasattr(current_obj, '__module__') else ''
return ["<class '" + module + current_obj.__name__ + "'>"]
# None
if current_obj is None:
return ['None']
# Format block of lines
def format_block(lines, open_bkt='', close_bkt=''):
new_lines = [] # new_lines will be returned if width exceeded
one_line = '' # otherwise, one_line will be returned.
if open_bkt:
new_lines.append(open_bkt)
one_line += open_bkt
for line in lines:
new_lines.append(indent + line)
if len(one_line) <= current_width:
one_line += line
if close_bkt:
if lines:
new_lines.append(close_bkt)
else:
new_lines[-1] += close_bkt
one_line += close_bkt
return [one_line] if len(one_line) <= current_width and one_line else new_lines
class SkipElement(object):
pass
class ErrorAttr(object):
def __init__(self, e):
self.e = e
def cut_seq(seq):
if current_depth < 1:
return [SkipElement()]
if len(seq) <= seq_length:
return seq
elif seq_length > 1:
seq = list(seq) if isinstance(seq, tuple) else seq
return seq[:int(seq_length / 2)] + [SkipElement()] + seq[int((1 - seq_length) / 2):]
return [SkipElement()]
def format_seq(extra_lines):
r = []
items = cut_seq(obj_items)
for n, i in enumerate(items, 1):
if type(i) is SkipElement:
r.append('...')
else:
if type(current_obj) is dict or seq_type_descendant and isinstance(current_obj, dict):
(k, v) = i
k = inspect_nested_object(k)
v = inspect_nested_object(v)
k[-1] += ': ' + v.pop(0)
r.extend(k)
r.extend(format_block(v))
elif type(current_obj) in seq_types or seq_type_descendant and isinstance(current_obj, seq_types):
r.extend(inspect_nested_object(i))
else:
(k, v) = i
k = [k]
if type(v) is ErrorAttr:
e_message = '<Attribute error: ' + type(v.e).__name__
if hasattr(v.e, 'message'):
e_message += ': ' + v.e.message
e_message += '>'
v = [e_message]
else:
v = inspect_nested_object(v)
k[-1] += ' = ' + v.pop(0)
r.extend(k)
r.extend(format_block(v))
if n < len(items) or extra_lines:
r[-1] += ', '
return format_block(r + extra_lines, *brackets)
# Sequence types
# Others objects are considered as sequence of members
extra_lines = []
if type(current_obj) in seq_types or seq_type_descendant and isinstance(current_obj, seq_types):
if isinstance(current_obj, dict):
obj_items = list(current_obj.items())
else:
obj_items = current_obj
if seq_type_descendant:
brackets = seq_brackets[[seq_type for seq_type in seq_types if isinstance(current_obj, seq_type)].pop()]
else:
brackets = seq_brackets[type(current_obj)]
else:
obj_items = []
for k in sorted(dir(current_obj)):
if not show_private and k.startswith('_') and '__' in k:
continue
if not show_protected and k.startswith('_'):
continue
try:
v = getattr(current_obj, k)
if isroutine(v):
continue
if not show_static and hasattr(type(current_obj), k) and v is getattr(type(current_obj), k):
continue
if not show_properties and hasattr(type(current_obj), k) and isinstance(
getattr(type(current_obj), k), property):
continue
except Exception as e:
v = ErrorAttr(e)
obj_items.append((k, v))
if isinstance(current_obj, seq_types):
# If object's class was inherited from one of basic sequence types
extra_lines += inspect_nested_object(current_obj, seq_type_descendant=True)
module = current_obj.__module__ + '.' if hasattr(current_obj, '__module__') else ''
address = ' at ' + hex(id(current_obj)) + ' ' if show_address else ''
brackets = (module + type(current_obj).__name__ + address + '(', ')')
return format_seq(extra_lines)
return '\n'.join(inspect_object(obj, depth, width))
|
90781dd00d5d0eca25e82dfb0d33f3240ccc8ab9
| 3,648,647
|
def makeTriangularMAFdist(low=0.02, high=0.5, beta=5):
"""Fake a non-uniform maf distribution to make the data
more interesting - more rare alleles """
MAFdistribution = []
for i in xrange(int(100*low),int(100*high)+1):
freq = (51 - i)/100.0 # large numbers of small allele freqs
for j in range(beta*i): # or i*i for crude exponential distribution
MAFdistribution.append(freq)
return MAFdistribution
|
4210fffbe411364b2de8ffbc1e6487c8d3a87a09
| 3,648,648
|
def create_structures_hdf5_stitched_ref_gene_file_npy(stitching_file, joining, nr_pixels,
reference_gene, blend = 'non linear'):
"""Takes an HDF5 file handle and creates the necessary structures.
Modification of create_structures_hdf5_files to work with .npy list of
files
Creates groups and data sets, when the groups or data sets already
exists, they are kept as they are, as long as the data sets have
the right size and data type. Incompatible data sets will be
overwritten.
Stitching file has the following structure:
__________________Groups____________|___Data sets____
gene_stitched:
StitchedImage:
final_image
blending_mask
Parameters:
-----------
stitching_file: pointer
HDF5 file handle. The file where the stitched images will be saved.
joining: dict
Dictionary containing keys 'corner_list and 'final_image_shape'.
Corner_list is a list of list, each list is a pair of an image number
(int) and it's coordinates (numpy array containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the number of dimensions and contains ints.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
reference_gene: str
The name of the gene we are stitching.This will be used to place the
data in the right group in stitching_file.
blend: str
When 'non linear' or 'linear',blending will be applied,
so we will need to create the structures
necessary for saving the blended tiles. When
it has another value or is None no blending at
all will be applied, so we can skip this.
This variable also determines to return value of
linear_blending.
Returns:
--------
stitched_group: pointer
HDF5 reference to the group where the final will be.
linear_blending: bool
When True later blending should be linear and when False, blending
should be non-linear.
blend: str
When 'non linear' or 'linear', blending should be applied. When
it has another value or is None no blending at
all will be applied.
"""
logger.info("Generating stitching file structures.")
# Create a group for the stitched images in the stitching file
stitching_file.require_group(reference_gene)
stitched_group = stitching_file[reference_gene].require_group('StitchedImage')
# Create the final image in this file
try:
final_image = stitched_group.require_dataset('final_image',
joining['final_image_shape'],
dtype = np.float64)
except TypeError as err:
logger.info("Incompatible 'final_image' data set already existed, deleting old dataset.\n {}"
.format(err))
del stitched_group['final_image']
inout.free_hdf5_space(stitching_file)
final_image = stitched_group.require_dataset('final_image',
joining['final_image_shape'],
dtype = np.float64)
# If blending is required initialize the blending mask in the
# hdf5 file
if blend is not None:
# For the blending masks use only the last 2 dimensions of final
# image shape, because also when working in 3D the masks can be
# 2D as there is the same shift in x and y direction for the
# whole stack.
try:
blending_mask = stitched_group.require_dataset('blending_mask',
joining['final_image_shape'][-2:],
dtype = np.float64)
except TypeError as err:
logger.info("Incompatible 'blending_mask' data set already existed, deleting old dataset.\n {}"
.format(err))
del stitched_group['blending_mask']
inout.free_hdf5_space(stitching_file)
final_image = stitched_group.require_dataset('blending_mask',
joining['final_image_shape'][-2:],
dtype = np.float64)
# Check type of blending
if blend == 'non linear':
linear_blending = False
elif blend == 'linear':
linear_blending = True
else:
linear_blending = False
logger.warning("Blend not defined correctly, \
using non-linear blending, \
blend is: {}".format(blend))
if False:
logger.info("Flushing hdf5 file to clean up after delete operations")
before_flush = stitching_file.id.get_filesize()
stitching_file.flush()
after_flush = stitching_file.id.get_filesize()
logger.debug("Size in bytes before flush: {} after flush: {} space freed: {}".format(before_flush, after_flush, before_flush - after_flush))
return stitched_group, linear_blending, blend
|
2ddbc18f2f946e429ca5fcefd03a5108edbecd36
| 3,648,649
|
def contains_whitespace(s : str):
"""
Returns True if any whitespace chars in input string.
"""
return " " in s or "\t" in s
|
c5dc974988efcfa4fe0ec83d115dfa7508cef798
| 3,648,650
|
import itertools
def get_files_to_check(files, filter_function):
# type: (List[str], Callable[[str], bool]) -> List[str]
"""Get a list of files that need to be checked based on which files are managed by git."""
# Get a list of candidate_files
candidates_nested = [expand_file_string(f) for f in files]
candidates = list(itertools.chain.from_iterable(candidates_nested))
if len(files) > 0 and len(candidates) == 0:
raise ValueError("Globs '%s' did not find any files with glob." % (files))
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_candidates(candidates, filter_function) for r in repos]))
if len(files) > 0 and len(valid_files) == 0:
raise ValueError("Globs '%s' did not find any files with glob in git." % (files))
return valid_files
|
6b508745aa47e51c4b8f4b88d7a2dff782289206
| 3,648,651
|
def main(argv):
"""Parse the argv, verify the args, and call the runner."""
args = arg_parse(argv)
return run(args.top_foods, args.top_food_categories)
|
bb2da95cec48b1abfa0e1c0064d413e55767aa89
| 3,648,652
|
import requests
def _failover_read_request(request_fn, endpoint, path, body, headers, params, timeout):
""" This function auto-retries read-only requests until they return a 2xx status code. """
try:
return request_fn('GET', endpoint, path, body, headers, params, timeout)
except (requests.exceptions.RequestException, Non200ResponseException) as ex:
raise FailoverException(ex)
|
731a14e72ff4fa88f160215f711fcca2199c736c
| 3,648,653
|
def GenerateConfig(context):
"""Generates configuration."""
image = ''.join(['https://www.googleapis.com/compute/v1/',
'projects/google-containers/global/images/',
context.properties['containerImage']])
default_network = ''.join(['https://www.googleapis.com/compute/v1/projects/',
context.env['project'],
'/global/networks/default'])
instance_template = {
'name': context.env['name'] + '-it',
'type': 'compute.v1.instanceTemplate',
'properties': {
'properties': {
'metadata': {
'items': [{
'key': 'google-container-manifest',
'value': GenerateManifest(context)
}]
},
'machineType': 'f1-micro',
'disks': [{
'deviceName': 'boot',
'boot': True,
'autoDelete': True,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {'sourceImage': image}
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': default_network
}]
}
}
}
outputs = [{'name': 'instanceTemplateSelfLink',
'value': '$(ref.' + instance_template['name'] + '.selfLink)'}]
return {'resources': [instance_template], 'outputs': outputs}
|
4dbf579c780b0305b4a0b5dcfb3860a086867cbb
| 3,648,654
|
from typing import Optional
from typing import List
async def read_all_orders(
status_order: Optional[str] = None,
priority: Optional[int] = None,
age: Optional[str] = None,
value: Optional[str] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
db: AsyncIOMotorClient = Depends(get_database),
) -> List[OrderSchema]:
"""[summary]
Get all item by ID.
[description]
Endpoint to retrieve an specific item.
[optional]
[ON CREATE] Filter order by status: ['to-do', 'doing', 'done']
"""
filters = {
"status": status_order,
"priority": priority,
"age": age,
"value": value,
"start_date": start_date,
"end_date": end_date,
}
orders_list = await orders.get_all(db, filters)
if not orders_list:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Note not found"
)
return list(map(fix_item_id, orders_list))
|
28200a5dd9c508690ad3234c3080f0bb44a425c4
| 3,648,655
|
def read_log_file(path):
"""
Read the log file for 3D Match's log files
"""
with open(path, "r") as f:
log_lines = f.readlines()
log_lines = [line.strip() for line in log_lines]
num_logs = len(log_lines) // 5
transforms = []
for i in range(0, num_logs, 5):
meta_data = np.fromstring(log_lines[i], dtype=int, sep=" \t")
transform = np.zeros((4, 4), dtype=float)
for j in range(4):
transform[j] = np.fromstring(log_lines[i + j + 1], dtype=float, sep=" \t")
transforms.append((meta_data, transform))
return transforms
|
dcd010f27c94d5bcd2287cb83765fe0eca291628
| 3,648,656
|
import math
def divide_list(l, n):
"""Divides list l into n successive chunks."""
length = len(l)
chunk_size = int(math.ceil(length/n))
expected_length = n * chunk_size
chunks = []
for i in range(0, expected_length, chunk_size):
chunks.append(l[i:i+chunk_size])
for i in range(len(chunks), n):
chunks.append([])
return chunks
|
bad7c118988baebd5712cd496bb087cd8788abb7
| 3,648,657
|
def sigma(n):
"""Calculate the sum of all divisors of N."""
return sum(divisors(n))
|
13dd02c10744ce74b2a89bb4231c9c055eefa065
| 3,648,658
|
def ATOMPAIRSfpDataFrame(chempandas,namecol,smicol):
"""
AtomPairs-based fingerprints 2048 bits.
"""
assert chempandas.shape[0] <= MAXLINES
molsmitmp = [Chem.MolFromSmiles(x) for x in chempandas.iloc[:,smicol]]
i = 0
molsmi = []
for x in molsmitmp:
if x is not None:
x.SetProp("_Name",chempandas.iloc[i,namecol])
molsmi.append(x)
i += 1
# ATOMPAIRS Fingerprints.
fps = [Pairs.GetAtomPairFingerprintAsBitVect(x) for x in molsmi]
fpsmat = np.matrix(fps)
df = DataFrame(fpsmat,index = [x.GetProp("_Name") for x in molsmi]) # how to name the col?
df['SMILES'] = [Chem.MolToSmiles(x) for x in molsmi]
df['CHEMBL'] = df.index
return(df)
|
cb2babbebd60162c4cc9aa4300dba14cc2cf7ce8
| 3,648,659
|
def filter_by_minimum(X, region):
"""Filter synapses by minimum.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where((X[:,2] >= i[0])*(X[:,3] >= i[1])*(X[:,4] >= i[2]))[0]
return X[vals,:]
|
6af135d7ecc716c957bf44ed17caab4f9dd63215
| 3,648,660
|
import tqdm
def gen_graphs(sizes):
"""
Generate community graphs.
"""
A = []
for V in tqdm(sizes):
G = nx.barabasi_albert_graph(V, 3)
G = nx.to_numpy_array(G)
P = np.eye(V)
np.random.shuffle(P)
A.append(P.T @ G @ P)
return np.array(A)
|
6decd819a5e2afad9744270c616a60c532f2e6fd
| 3,648,661
|
def daemonize(identity: str, kind: str = 'workspace') -> DaemonID:
"""Convert to DaemonID
:param identity: uuid or DaemonID
:param kind: defaults to 'workspace'
:return: DaemonID from identity
"""
try:
return DaemonID(identity)
except TypeError:
return DaemonID(f'j{kind}-{identity}')
|
8cd08d9d8a558b9f78de60d2df96db553fbca8bf
| 3,648,662
|
from typing import Counter
def count_gender(data_list:list):
"""
Contar a população dos gêneros
args:
data_list (list): Lista de dados que possui a propriedade 'Gender'
return (list): Retorna uma lista com o total de elementos do gênero 'Male' e 'Female', nessa ordem
"""
genders = column_to_list(data_list, "Gender")
genders_counter = Counter(genders)
male = genders_counter["Male"]
female = genders_counter["Female"]
return [male, female]
|
9e8a05067a617ca0606eec8d216e25d3f937e097
| 3,648,663
|
async def card_balance(request: Request):
""" 返回用户校园卡余额 """
cookies = await get_cookies(request)
balance_data = await balance.balance(cookies)
return success(data=balance_data)
|
09e2b7e84743a0ed5625a6cc19dda0e97eb6df10
| 3,648,664
|
def _grid_vals(grid, dist_name, scn_save_fs,
mod_thy_info, constraint_dct):
""" efef
"""
# Initialize the lists
locs_lst = []
enes_lst = []
# Build the lists of all the locs for the grid
grid_locs = []
for grid_val_i in grid:
if constraint_dct is None:
grid_locs.append([[dist_name], [grid_val_i]])
else:
grid_locs.append([constraint_dct, [dist_name], [grid_val_i]])
# Get the energies along the grid
for locs in grid_locs:
if scn_save_fs[-1].exists(locs):
scn_path = scn_save_fs[-1].path(locs)
sp_save_fs = autofile.fs.single_point(scn_path)
enes_lst.append(sp_save_fs[-1].file.energy.read(mod_thy_info[1:4]))
locs_lst.append(locs)
return locs_lst, enes_lst
|
a401632285c9ac48136239fcfa7f3c2eb760734c
| 3,648,665
|
import vtool as vt
def group_images_by_label(label_arr, gid_arr):
"""
Input: Length N list of labels and ids
Output: Length M list of unique labels, and lenth M list of lists of ids
"""
# Reverse the image to cluster index mapping
labels_, groupxs_ = vt.group_indices(label_arr)
sortx = np.array(list(map(len, groupxs_))).argsort()[::-1]
labels = labels_.take(sortx, axis=0)
groupxs = ut.take(groupxs_, sortx)
label_gids = vt.apply_grouping(gid_arr, groupxs)
return labels, label_gids
|
9bdc83f2a9a5810b5d3cf443dcd72852dd35c26b
| 3,648,666
|
from typing import Optional
def ask_user(prompt: str, default: str = None) -> Optional[str]:
"""
Prompts the user, with a default. Returns user input from ``stdin``.
"""
if default is None:
prompt += ": "
else:
prompt += " [" + default + "]: "
result = input(prompt)
return result if len(result) > 0 else default
|
7803d7e71b2cc3864440cd99c276784cebf81f91
| 3,648,667
|
def tensor_index_by_tuple(data, tuple_index):
"""Tensor getitem by tuple of various types with None"""
if not tuple_index:
return data
op_name = const_utils.TENSOR_GETITEM
tuple_index = _transform_ellipsis_to_slice(data, tuple_index, op_name)
data, tuple_index = _expand_data_dims(data, tuple_index)
min_data_dim, max_data_dim = 1, 8
const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim)
indexes_types = hyper_map(F.typeof, tuple_index)
contain_type = const_utils.tuple_index_type_cnt(indexes_types, op_name)
if contain_type == const_utils.ALL_BASIC:
return _tensor_getitem_by_tuple_slice(data, tuple_index)
return _tensor_getitem_by_tuple(data, tuple_index, op_name)
|
4a32d9f4028f4ac57d1e523c946bb4a4d349b120
| 3,648,668
|
import scipy
def are_neurons_responsive(spike_times, spike_clusters, stimulus_intervals=None,
spontaneous_period=None, p_value_threshold=.05):
"""
Return which neurons are responsive after specific stimulus events, compared to spontaneous
activity, according to a Wilcoxon test.
:param spike_times: times of spikes, in seconds
:type spike_times: 1D array
:param spike_clusters: spike neurons
:type spike_clusters: 1D array, same length as spike_times
:type stimulus_intervals: the times of
the stimulus events onsets and offsets
:param stimulus_intervals: 2D array
:type spontaneous_period: the period of spontaneous activity
:param spontaneous_period: 1D array with 2 elements
:param p_value_threshold: the threshold for the
p value in the Wilcoxon test.
:type p_value_threshold: float
:rtype: 1D boolean array with `n_neurons`
elements (clusters are sorted by increasing cluster
id as appearing in spike_clusters).
"""
stimulus_counts = _get_spike_counts_in_bins(
spike_times, spike_clusters, stimulus_intervals)
# Find spontaneous intervals.
stimulus_durations = np.diff(stimulus_intervals, axis=1).squeeze()
t0, t1 = spontaneous_period
spontaneous_starts = np.linspace(
t0,
t1 - stimulus_durations.max(),
len(stimulus_intervals))
spontaneous_intervals = np.c_[
spontaneous_starts,
spontaneous_starts +
stimulus_durations]
# Count the spontaneous counts.
spontaneous_counts = _get_spike_counts_in_bins(
spike_times, spike_clusters, spontaneous_intervals)
assert stimulus_counts.shape == stimulus_counts.shape
# Generate the responsive vector (for every neuron, whether it is
# responsive).
responsive = np.zeros(stimulus_counts.shape[0], dtype=bool)
n_neurons = stimulus_counts.shape[0]
for i in range(n_neurons):
x = stimulus_counts[i, :]
y = spontaneous_counts[i, :]
try:
_, p = scipy.stats.wilcoxon(x, y)
except ValueError:
pass
responsive[i] = p < p_value_threshold
return responsive
|
b5b835113644d7c42e7950cc8fc5699e62d631fa
| 3,648,669
|
def _get_book(**keywords):
"""Get an instance of :class:`Book` from an excel source
Where the dictionary should have text as keys and two dimensional
array as values.
"""
source = factory.get_book_source(**keywords)
sheets = source.get_data()
filename, path = source.get_source_info()
return sheets, filename, path
|
2e6114c2948272ce2d342d954594a8d626a45635
| 3,648,670
|
def handler(
state_store: StateStore,
hardware_api: HardwareAPI,
movement_handler: MovementHandler,
) -> PipettingHandler:
"""Create a PipettingHandler with its dependencies mocked out."""
return PipettingHandler(
state_store=state_store,
hardware_api=hardware_api,
movement_handler=movement_handler,
)
|
099532e3c7d51812548643d10758ec23c8354a00
| 3,648,671
|
def get_domain(domain_name):
"""
Query the Rackspace DNS API to get a domain object for the domain name.
Keyword arguments:
domain_name -- the domain name that needs a challenge record
"""
base_domain_name = get_tld("http://{0}".format(domain_name))
domain = rax_dns.find(name=base_domain_name)
return domain
|
5ea1bbe9c73250abf60c5ad9f1d796035ed87654
| 3,648,672
|
import sympy
def lobatto(n):
"""Get Gauss-Lobatto-Legendre points and weights.
Parameters
----------
n : int
Number of points
"""
if n == 2:
return ([0, 1],
[sympy.Rational(1, 2), sympy.Rational(1, 2)])
if n == 3:
return ([0, sympy.Rational(1, 2), 1],
[sympy.Rational(1, 6), sympy.Rational(2, 3), sympy.Rational(1, 6)])
if n == 4:
return ([0, (1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2, 1],
[sympy.Rational(1, 12), sympy.Rational(5, 12), sympy.Rational(5, 12),
sympy.Rational(1, 12)])
if n == 5:
return ([0, (1 - sympy.sqrt(3) / sympy.sqrt(7)) / 2, sympy.Rational(1, 2),
(1 + sympy.sqrt(3) / sympy.sqrt(7)) / 2, 1],
[sympy.Rational(1, 20), sympy.Rational(49, 180), sympy.Rational(16, 45),
sympy.Rational(49, 180), sympy.Rational(1, 20)])
if n == 6:
return ([0,
(1 - sympy.sqrt(sympy.Rational(1, 3) + (2 * sympy.sqrt(7) / 21))) / 2,
(1 - sympy.sqrt(sympy.Rational(1, 3) - (2 * sympy.sqrt(7) / 21))) / 2,
(1 + sympy.sqrt(sympy.Rational(1, 3) - (2 * sympy.sqrt(7) / 21))) / 2,
(1 + sympy.sqrt(sympy.Rational(1, 3) + (2 * sympy.sqrt(7) / 21))) / 2,
1],
[sympy.Rational(1, 30), (14 - sympy.sqrt(7)) / 60, (14 + sympy.sqrt(7)) / 60,
(14 + sympy.sqrt(7)) / 60, (14 - sympy.sqrt(7)) / 60, sympy.Rational(1, 30)])
if n == 7:
return ([0,
(1 - sympy.sqrt((5 + 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
(1 - sympy.sqrt((5 - 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
sympy.Rational(1, 2),
(1 + sympy.sqrt((5 - 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
(1 + sympy.sqrt((5 + 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
1],
[sympy.Rational(1, 42),
(124 - 7 * sympy.sqrt(15)) / 700,
(124 + 7 * sympy.sqrt(15)) / 700,
sympy.Rational(128, 525),
(124 + 7 * sympy.sqrt(15)) / 700,
(124 - 7 * sympy.sqrt(15)) / 700,
sympy.Rational(1, 42)])
raise NotImplementedError()
|
595610ec035dd9b059ab085375c017856359b837
| 3,648,673
|
def login():
"""Login."""
username = request.form.get('username')
password = request.form.get('password')
if not username:
flask.flash('Username is required.', 'warning')
elif password is None:
flask.flash('Password is required.', 'warning')
else:
user = models.User.login_user(username, password)
if user:
session['user'] = user.username
return flask.redirect(flask.url_for('catalog'))
flask.flash('Invalid username/password.', 'danger')
return flask.redirect(flask.url_for('home'))
|
c54bc743ac305db8f0d74a7bd62f7bb70a952454
| 3,648,674
|
def comp_periodicity(self, p=None):
"""Compute the periodicity factor of the lamination
Parameters
----------
self : LamSlotMulti
A LamSlotMulti object
Returns
-------
per_a : int
Number of spatial periodicities of the lamination
is_antiper_a : bool
True if an spatial anti-periodicity is possible after the periodicities
per_t : int
Number of time periodicities of the lamination
is_antiper_t : bool
True if an time anti-periodicity is possible after the periodicities
"""
if self.sym_dict_enforced is not None:
self.get_logger().debug("Enforcing symmetry for LamSlotMulti")
return (
self.sym_dict_enforced["per_a"],
self.sym_dict_enforced["is_antiper_a"],
self.sym_dict_enforced["per_t"],
self.sym_dict_enforced["is_antiper_t"],
)
else:
Zs = self.get_Zs()
is_aper = False
# TODO compute it
self.get_logger().debug("Symmetry not available yet for LamSlotMulti")
return 1, is_aper, 1, is_aper
|
5880ce1f8e3785d93e2979180133b64c014b9927
| 3,648,675
|
def sha2_384(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_384(), data)
|
7523ca7e2d11e3b686db45a28d09ccdad17ff243
| 3,648,676
|
import string
def cat(arr, match="CAT", upper_bound=None, lower_bound=None):
"""
Basic idea is if a monkey typed randomly, how long would it take for it
to write `CAT`. Practically, we are mapping generated numbers onto the
alphabet.
>"There are 26**3 = 17 576 possible 3-letter words, so the average number of
keystrokes necessary to produce CAT should be around 17 576" [1]
Example
-------
Parameters
----------
word: string or list-type object
All elements of the string must be the same number of characters
match: string or list-type object
The keyword to search for. Other than length, doesn't really matter.
If you pass in a list of strings, it will give you a result for each
passed in string.
upper_bound: int (optional)
Upper bound of random values. If not set, will calculate the minimum
value from the array passed.
lower_bound: int (optional)
Lower bound of random values. If not set, will calculate the maximum
value from the array passed.
Returns
-------
dict
Key is the string passed into match, the value is a list of the
iteration cycles it was found at
Notes
-----
[1]: Marsaglia, G. and Zaman, A., (1995), Monkey tests for random number
generators, Computers & Mathematics with Applications, 9, No. 9, 1–10.
"""
if upper_bound is None:
upper_bound = np.max(arr)
if lower_bound is None:
lower_bound = np.min(arr)
if isinstance(match, str):
match = [match]
match = list(map(str.upper, match))
num_letters = len(match[0])
assert all([len(match_i) == num_letters for match_i in match]), \
"All elements of `match` must have the same number of characters"
n_uppercase = len(string.ascii_uppercase)
# {...number: letter...} mapping
mapping = dict(zip(range(n_uppercase), string.ascii_uppercase))
# Scale the array so that everything is between 0 and 26
arr_norm = np.floor((arr - lower_bound) * (n_uppercase/upper_bound))
# Map the integer component to letters
letters = [mapping[i] for i in arr_norm.astype(np.int)]
# Split the array of letters into words
words = chunker(letters, batch_size=num_letters, complete=True)
iter_counts = {match_i: [] for match_i in match}
for i, letter_list in enumerate(words):
word = ''.join(letter_list)
if word in match:
iter_counts[word].append(i)
return iter_counts
|
1077bc5c4bc989e416cdaf27427f5a617491210d
| 3,648,677
|
def encrypt_data(key: bytes, data: str) -> str:
"""
Encrypt the data
:param key: key to encrypt the data
:param data: data to be encrypted
:returns: bytes encrypted
"""
# instance class
cipher_suite = Fernet(key)
# convert our data into bytes mode
data_to_bytes = bytes(data, "utf-8")
encrypted = cipher_suite.encrypt(data_to_bytes)
return encrypted.decode("utf-8")
|
80e69657987956b3a0dc6d87dee79a4dcc5db3f7
| 3,648,678
|
from typing import List
from typing import Dict
def make_doi_table(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the DOI table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
for paper in dataset.papers:
# Doi, events and grids
doi = paper.doi.upper()
events = make_doi_events(doi, paper.events)
# Affiliations: institutions, countries, regions, subregion, funders, journals, publishers
institutions = make_doi_institutions(paper.authors)
countries = make_doi_countries(paper.authors)
regions = make_doi_regions(paper.authors)
subregions = make_doi_subregions(paper.authors)
funders = make_doi_funders(paper.funders)
journals = make_doi_journals(paper.journal)
publishers = make_doi_publishers(paper.publisher)
# Make final record
records.append(
{
"doi": doi,
"crossref": {
"title": paper.title,
"published_year": paper.published_date.year,
"published_month": paper.published_date.month,
"published_year_month": f"{paper.published_date.year}-{paper.published_date.month}",
"funder": [{"name": funder.name, "DOI": funder.doi} for funder in paper.funders],
},
"unpaywall": {},
"unpaywall_history": {},
"mag": {},
"open_citations": {},
"events": events,
"affiliations": {
"doi": doi,
"institutions": institutions,
"countries": countries,
"subregions": subregions,
"regions": regions,
"groupings": [],
"funders": funders,
"authors": [],
"journals": journals,
"publishers": publishers,
},
}
)
# Sort to match with sorted results
records.sort(key=lambda r: r["doi"])
return records
|
6347142546579712574a63048e6d778dfd558249
| 3,648,679
|
from typing import List
import os
def get_output_file_path(file_path: str) -> str:
"""
get the output file's path
:param file_path: the file path
:return: the output file's path
"""
split_file_path: List[str] = list(os.path.splitext(file_path))
return f'{split_file_path[0]}_sorted{split_file_path[1]}'
|
baf014ab2587c2b8b3248284e4993a76c502983a
| 3,648,680
|
def binarySearch(arr, val):
"""
array values must be sorted
"""
left = 0
right = len(arr) - 1
half = (left + right) // 2
while arr[half] != val:
if val < arr[half]:
right = half - 1
else:
left = half + 1
half = (left + right) // 2
if arr[half] == val:
return half
return -1
|
2457e01dee0f3e3dd988471ca708883d2a612066
| 3,648,681
|
from typing import Union
from re import M
from typing import cast
def foreign_key(
recipe: Union[Recipe[M], str], one_to_one: bool = False
) -> RecipeForeignKey[M]:
"""Return a `RecipeForeignKey`.
Return the callable, so that the associated `_model` will not be created
during the recipe definition.
This resolves recipes supplied as strings from other module paths or from
the calling code's module.
"""
if isinstance(recipe, str):
# Load `Recipe` from string before handing off to `RecipeForeignKey`
try:
# Try to load from another module
recipe = baker._recipe(recipe)
except (AttributeError, ImportError, ValueError):
# Probably not in another module, so load it from calling module
recipe = _load_recipe_from_calling_module(cast(str, recipe))
return RecipeForeignKey(cast(Recipe[M], recipe), one_to_one)
|
f865bf1c7a91a124ca7518a2a2050371112e820e
| 3,648,682
|
def posterize(image, num_bits):
"""Equivalent of PIL Posterize."""
shift = 8 - num_bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
|
11dc20facfd5ac57e7547036304d192ce21fdb0a
| 3,648,683
|
import os
def validate_targetRegionBedFile_for_runType(
value,
field_label,
runType,
reference,
nucleotideType=None,
applicationGroupName=None,
isPrimaryTargetRegion=True,
barcodeId="",
runType_label=ugettext_lazy("workflow.step.application.fields.runType.label"),
):
"""
validate targetRegionBedFile based on the selected reference and the plan's runType
"""
errors = []
value = value.strip() if value else ""
if value:
missing_file = check_uploaded_files(bedfilePaths=[value])
if missing_file:
errors.append("%s : %s not found" % (field_label, value))
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_run() SKIPS validation due to no targetRegion file exists in db. value=%s"
% (value)
)
return errors
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_runType() value=%s; runType=%s; reference=%s; nucleotideType=%s; applicationGroupName=%s"
% (value, runType, reference, nucleotideType, applicationGroupName)
)
if not isPrimaryTargetRegion:
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_run() SKIPS validation due to no validation rules for non-primary targetRegion. value=%s"
% (value)
)
return errors
if reference:
if runType:
runType = runType.strip()
applProducts = ApplProduct.objects.filter(
isActive=True,
applType__runType=runType,
applicationGroup__name=applicationGroupName,
) or ApplProduct.objects.filter(isActive=True, applType__runType=runType)
if applProducts:
applProduct = applProducts[0]
if applProduct:
if (
validation.has_value(value)
and not applProduct.isTargetRegionBEDFileSupported
):
errors.append(
validation.invalid_invalid_related(
field_label, ScientificApplication.verbose_name
)
)
else:
isRequired = (
applProduct.isTargetRegionBEDFileSelectionRequiredForRefSelection
)
if (
isRequired
and not validation.has_value(value)
and not barcodeId
):
# skip for now
if (
runType in ["AMPS_DNA_RNA", "AMPS_HD_DNA_RNA"]
and nucleotideType
and nucleotideType.upper() == "RNA"
):
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_runType() ALLOW MISSING targetRegionBed for runType=%s; nucleotideType=%s"
% (runType, nucleotideType)
)
elif runType in ["AMPS_RNA", "AMPS_HD_RNA"]:
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_runType() ALLOW MISSING targetRegionBed for runType=%s; applicationGroupName=%s"
% (runType, applicationGroupName)
)
else:
errors.append(
validation.invalid_required_related(
field_label, ScientificApplication.verbose_name
)
)
elif value:
if not os.path.isfile(value):
errors.append(
validation.invalid_invalid_value(field_label, value)
)
else:
errors.append(
validation.invalid_invalid_value_related(
runType_label, runType, ScientificApplication.verbose_name
)
)
else:
errors.append(
validation.invalid_required_related(
runType_label, ScientificApplication.verbose_name
)
)
return errors
|
11369d8f74f2eea600676fee5d4deae17250efe7
| 3,648,684
|
def polyFit(x, y):
"""
Function to fit a straight line to data and estimate slope and
intercept of the line and corresponding errors using first order
polynomial fitting.
Parameters
----------
x : ndarray
X-axis data
y : ndarray
Y-axis data
Returns
-------
ndarray
slope, intercept, SDslope, SDintercept
Reference
---------
"""
# Number of input points
N = x.shape[0]
# Estimate slope and intercept of fitted line
slope, intercept = np.polyfit(x, y, 1)
# Calculate standard deviation of slope and intercept
yhat = intercept + slope * x
residual = y - yhat
Sx2 = np.sum(x**2)
Sxx = np.sum((x - np.mean(x))**2)
Sy_x = np.sqrt(np.sum(residual**2) / (N -2))
SDslope = Sy_x / np.sqrt(Sxx)
SDintercept = Sy_x * np.sqrt(Sx2 / (N * Sxx))
return np.array([[slope, intercept], [SDslope, SDintercept]])
|
0acc032492a63dbb271293bed8cf11c800621a35
| 3,648,685
|
import tdub.internal.stab_tests as tist
from pathlib import Path
import os
def stability_test_standard(
umbrella: Path,
outdir: Path | None = None,
tests: str | list[str] = "all",
) -> None:
"""Perform a battery of standard stability tests.
This function expects a rigid `umbrella` directory structure,
based on the output of results that are generated by rexpy_.
.. _rexpy: https://github.com/douglasdavis/rexpy
Parameters
----------
umbrella : pathlib.Path
Umbrella directory containing all fits run via rexpy's
standard fits.
outdir : pathlib.Path, optional
Directory to save results (defaults to current working
directory).
tests : str or list(str)
Which tests to execute. (default is "all"). The possible tests
include:
* ``"sys-drops"``, which shows the stability test for dropping
some systematics.
* ``"indiv-camps"``, which shows the stability test for
limiting the fit to individual campaigns.
* ``"regions"``, which shows the stability test for limiting
the fit to subsets of the analysis regions.
* ``"b0-check"``, which shows the stability test for limiting
the fit to individual analysis regions and checking the B0
eigenvector uncertainty.
"""
umbrella = umbrella.resolve()
curdir = Path.cwd().resolve()
if outdir is None:
outdir = curdir
else:
outdir.mkdir(parents=True, exist_ok=True)
if tests == "all":
tests = ["sys-drops", "indiv-camps", "regions", "b0-check"]
os.chdir(outdir)
if "sys-drops" in tests:
nom, names, labels, vals = tist.excluded_systematics_delta_mu_summary(
umbrella / "main.force-data.d" / "tW"
)
fig, ax = plt.subplots(figsize=(5.2, 1.5 + len(names) * 0.315))
fig.subplots_adjust(left=0.50, right=0.925)
tist.make_delta_mu_plot(
ax, nom.sig_hi, nom.sig_lo, vals["c"], vals["d"], vals["u"], labels
)
fig.savefig("stability-tests-sys-drops.pdf")
if "indiv-camps" in tests:
nom, names, labels, vals = tist.indiv_camp_delta_mu_summary(umbrella)
fig, ax = plt.subplots(figsize=(5.2, 1.5 + len(names) * 0.315))
fig.subplots_adjust(left=0.350, right=0.925, bottom=0.3, top=0.99)
tist.make_delta_mu_plot(
ax, nom.sig_hi, nom.sig_lo, vals["c"], vals["d"], vals["u"], labels
)
fig.savefig("stability-tests-indiv-camps.pdf")
if "regions" in tests:
nom, names, labels, vals = tist.region_delta_mu_summary(umbrella)
fig, ax = plt.subplots(figsize=(5.2, 1.5 + len(names) * 0.315))
fig.subplots_adjust(left=0.350, right=0.925, bottom=0.3, top=0.99)
tist.make_delta_mu_plot(
ax, nom.sig_hi, nom.sig_lo, vals["c"], vals["d"], vals["u"], labels
)
fig.savefig("stability-tests-regions.pdf")
if "b0-check" in tests:
fig, ax = tist.b0_by_year_fig_and_ax(umbrella)
fig.subplots_adjust(left=0.350, right=0.925, bottom=0.3, top=0.8)
fig.savefig("stability-tests-b0-check.pdf")
os.chdir(curdir)
return None
|
91b9d7d059a9c60e9079529da65fe44dcc6bcbe4
| 3,648,686
|
def evaluate_error(X, y, w):
"""Returns the mean squared error.
X : numpy.ndarray
Numpy array of data.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
dist = (y - y_predict) ** 2
return float(np.sum(dist)) / X.shape[0]
|
2e54a2bb64a590e3e35456b5039b4cfce7632c0f
| 3,648,687
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up ha_reef_pi from a config entry."""
websession = async_get_clientsession(hass)
coordinator = ReefPiDataUpdateCoordinator(hass, websession, entry)
await coordinator.async_config_entry_first_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][entry.entry_id] = {
"coordinator": coordinator,
"undo_update_listener": undo_listener,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
|
655a1265a77cec38346931a6cf5feaf923c1573b
| 3,648,688
|
def get_list(client):
"""
"""
request = client.__getattr__(MODULE).ListIpBlocks()
response, _ = request.result()
return response['results']
|
0836bc58d8108a804c9464a713184ac582bd4e90
| 3,648,689
|
import six
def file_asset(class_obj):
"""
Decorator to annotate the FileAsset class. Registers the decorated class
as the FileAsset known type.
"""
assert isinstance(class_obj, six.class_types), "class_obj is not a Class"
global _file_asset_resource_type
_file_asset_resource_type = class_obj
return class_obj
|
a21ae9d8ba84d2f6505194db6fd8bd84593f3928
| 3,648,690
|
import glob
import os
def get_bam_list(args):
"""
Retrieve bam list from given tumor bam directory
"""
bamList = []
for bam in glob.glob(os.path.join(args.tumor_bams_directory, "*.bam")):
# Todo: CMO bams don't always end in 'T'
# if os.path.basename(bam).split('_')[0].split('-')[-1].startswith('T'):
bamList.append(bam)
return bamList
|
151a4fd90164277ef38cf09d2f7243f3b204d2e9
| 3,648,691
|
def scheming_field_by_name(fields, name):
"""
Simple helper to grab a field from a schema field list
based on the field name passed. Returns None when not found.
"""
for f in fields:
if f.get('field_name') == name:
return f
|
ba4d04585b12ab941db8bc0787b076c32e76cadb
| 3,648,692
|
import os
import re
def saveResult(img_file, img, boxes, dirname='./result/', verticals=None, texts=None):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
#print("image shape:", img.shape)
height = img.shape[0]
width = img.shape[1]
#img = clustering(img)
alpha = 1.25 # Contrast control (1.0-3.0)
beta = 0# Brightness control (0-100)
img= cv2.convertScaleAbs(img, alpha=alpha, beta=beta)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + "res_" + filename + '.txt'
res_img_file = dirname + "res_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
# ignore top bboxes
boxes = boxes[2:]
# enlist top left corner of bboxes
top_l_points = []
textd = []
#texts = [i for i, _ in enumerate(boxes)]
with open(res_file, 'w') as f:
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) + '\r\n'
f.write(strResult)
#### these points contain edges of boxes or polygons, dependin
## on argument of SaveResult
poly = poly.reshape(-1, 2)
#### these points contain edges of boxes ##
#x, y, w, h = poly[0][0 ], poly[0][1], poly[2][0], poly[2][1]
# draw first point of box
#cv2.circle(img, (poly[0][0], poly[0][1]), 6, (255,0,0), 2)
# x, y = tuple(poly[0])
# w, h = tuple(poly[2])
y, x = tuple(poly[0])
h, w = tuple(poly[2])
#print(f"Coordinates are x {x}, y {y}, w {w}, h {h}")
img_copy = img.copy()
cropped_boxes = img_copy[int(min(x,w)-4):int(max(x,w)+4), int(min(y,h)-4):int(max(y,h)+4)]
#print("cropped boxes: ",cropped_boxes )
#print("min and max (w,x), min and max (y,h)",
if cropped_boxes is not None:
cv2.imwrite("saved_{}_box.png".format(i), cropped_boxes)
dilated_img = cv2.dilate(cropped_boxes[:,:,1], np.ones((33,33 ), np.uint8))
#bg_img = cv2.GaussianBlur(dilated_img, (9,9),0)
bg_img = cv2.medianBlur(dilated_img, 11)
#--- finding absolute difference to preserve edges ---
diff_img = 255 - cv2.absdiff(cropped_boxes[:,:,1], bg_img)
#--- normalizing between 0 to 255 ---
norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
#--- Otsu threshold ---
#th = cv2.adaptiveThreshold(norm_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
cropped_boxes = cv2.threshold(norm_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#cropped_boxes = clustering(cropped_boxes)
text = pytesseract.image_to_string(cropped_boxes, lang="spa", config='--psm 6')
#print("text by crop box {}".format(i), text)
top_l_points.append(tuple(poly[0]))
text_w_n_blanks = text.strip()
textd.append(text_w_n_blanks)
# Check where DoB is
check_dob = re.search("[0-9]{1,2}(/)[0-9]{1,2}(/)[0-9]{4}",
text_w_n_blanks)
if check_dob:
x_b, y_b = tuple(poly[0])
DoB_point = tuple(poly[0])
DoB_text = text
#print("DoB point: ", poly[0])
print("DoB: ", DoB_text)
# Check where curp is
check_curp = re.search("[a-zA-Z]{4}[0-9]{6}[a-zA-Z]{4}", text_w_n_blanks)
if check_curp:
curp = text.split(" ")[-1]
print("curp: ", curp)
# Check where clave de elector is
check_clave = re.search("[a-zA-Z]{6}[0-9]{8}[a-zA-Z]{1}", text_w_n_blanks)
if check_clave:
clave = text.split(" ")[-1]
print("clave: ", clave)
# Check where registro is
check_registro= re.search("[0-9]{4}( )[0-9]{2}", text_w_n_blanks)
if check_registro:
registro1 = text.split(" ")[-2:-1][0]
registro2 = text.split(" ")[-1]
registro = registro1+" "+registro2
print("registro: ", registro1, registro2)
# Check emisión and vigencia
#check_vig_emision= re.search("[0-9]{4}( )[a-zA-Z]{8}( )[0-9]{4}",
# text_w_n_blanks)
vig = text_w_n_blanks.split(" ")[-1]
emi = text_w_n_blanks.split(" ")[0]
check_vig= re.search("[0-9]{4}", vig)
check_emi= re.search("[0-9]{4}", emi)
if check_vig and check_emi:
print("vigencia: ", vig)
print("emisión: ", emi)
vigencia = vig
emisión = emi
# check if sexo
if "sex" in text_w_n_blanks.lower():
sexo = text.split(" ")[-1]
print("sexo check", sexo)
if "m" in sexo.lower():
sexo = "M"
print("sexo: ",sexo)
else:
sexo = "H"
print("sexo: ",sexo)
#print("sexo: ", sexo)
# check if municipio
if "munici" in text_w_n_blanks.lower():
municipio = text.split(" ")[-1]
print("municipio: ", municipio)
if "esta" in text_w_n_blanks.lower():
estado = text.split(" ")[-1]
print("estado: ", estado)
#print("debug", text_w_n_blanks)
# all text is lowercase
text_w_n_blanks = text_w_n_blanks.lower()
#print(text_w_n_blanks)
print(DoB_point, DoB_text)
name_dicts = dict(zip(textd, top_l_points))
#print("name_dicts: ", name_dicts)
#print("DoB_point:", DoB_point)
for k, v in name_dicts.copy().items():
if v == tuple(DoB_point):
#print(k ,"value deleted")
del name_dicts[k]
top_l_points.remove(tuple(DoB_point))
## gets the nearest y coordinate initial bounding point
name_dicts0= {k:tuple(map(lambda i, j:
i - j, v, tuple(DoB_point))) for k, v in name_dicts.items() }
#print(name_dicts0)
for x,y in top_l_points:
if y < y_b+(0.015*height) and y > y_b-(0.015*height) :
# if y < y_b+15 and y > y_b-15 :
NamePoint = x,y
#print(NamePoint)
distances_list = []
for point in top_l_points:
distances_list.append(distance(point, NamePoint))
#print( distances_list)
for k, v in name_dicts.copy().items(): # (for Python 2.x)
if v == NamePoint:
PrimerApellido = k
#print("Primer apellido", k)
name_dicts2= {k:tuple(map(lambda i, j:
i - j, v, NamePoint)) for k, v in name_dicts.items() }
#print(name_dicts2)
dist_dict = {k:distance((0,0),v) for k,v in name_dicts2.items()}
#print(dist_dict)
sorted_dist_dict = {k: v for k, v in sorted(dist_dict.items(),
key=lambda item: item[1])}
#print(sorted_dist_dict)
## get the next two items (they are in ordered by the tuple)
## and should be the next two bounding boxes
names_list= list(sorted_dist_dict.keys())[:5]
names_list = [name for name in names_list
if "DOMICI" not in name]
names_list = [name for name in names_list
if "NOM" not in name]
names_list = [name for name in names_list
if "CREDENCIAL" not in name]
Domicilio_list= list(sorted_dist_dict.keys())[5:10]
#print(Domicilio_list)
Domicilio_list = [name for name in Domicilio_list
if "DOMICI" not in name]
Domicilio_list = [name for name in Domicilio_list
if "MÉXICO" not in name]
Domicilio_list = [name for name in Domicilio_list
if "CREDENCIAL" not in name]
Domicilio_list = [name for name in Domicilio_list
if "ELECTOR" not in name]
Domicilio_list = [name for name in Domicilio_list
if "CLAVE" not in name]
Domicilio_list = [name for name in Domicilio_list
if "cur" not in name]
domicilio_list_str = ' '.join([str(elem) for elem in Domicilio_list])
#print("names_list: ",names_list)
names_list_str = ' '.join([str(elem) for elem in names_list])
print()
print("Nombre completo: ", names_list_str)
print("Domicilio completo: ", domicilio_list_str)
#print("Fecha de nacimiento:", DoB_text)
# Save result image
cv2.imwrite(res_img_file, img)
return {"nombre": names_list_str, "fecha_de_nacimiento":DoB_text.strip(),
"sexo": sexo, "domicilio":domicilio_list_str,"clave_de_elector": clave.strip(),
"CURP": curp.strip(), "registro":registro.strip(), "numero_de_emisión":emisión,
"estado": estado.strip(), "municipio": municipio.strip(), "vigencia":vigencia}
#, "seccion": seccion}
|
10f68d8411f9f0a5fb9d0d4082960f6163dad7b3
| 3,648,693
|
def merge_sort(items):
"""Sorts a list of items.
Uses merge sort to sort the list items.
Args:
items: A list of items.
Returns:
The sorted list of items.
"""
n = len(items)
if n < 2:
return items
m = n // 2
left = merge_sort(items[:m])
right = merge_sort(items[m:])
return merge(left, right)
|
d42c60dda40fc421adef2d47f302426d7c176ba1
| 3,648,694
|
from typing import Optional
def hunk_boundary(
hunk: HunkInfo, operation_type: Optional[str] = None
) -> Optional[HunkBoundary]:
"""
Calculates boundary for the given hunk, returning a tuple of the form:
(<line number of boundary start>, <line number of boundary end>)
If operation_type is provided, it is used to filter down only to lines whose line_type matches
the operation_type. Possible values: "+", "-", None.
If there are no lines of the given type in the hunk, returns None.
"""
line_type_p = lambda line: True
if operation_type is not None:
line_type_p = lambda line: line.line_type == operation_type
admissible_lines = [line for line in hunk.lines if line_type_p(line)]
if not admissible_lines:
return None
return HunkBoundary(
operation_type=operation_type,
start=admissible_lines[0].new_line_number,
end=admissible_lines[-1].new_line_number,
)
|
c5ec0065e5ab85652a5d0d679a832fdd0dae1629
| 3,648,695
|
from pm4py.statistics.attributes.pandas import get as pd_attributes_filter
from pm4py.statistics.attributes.log import get as log_attributes_filter
def get_activities_list(log, parameters=None):
"""
Gets the activities list from a log object, sorted by activity name
Parameters
--------------
log
Log
parameters
Possible parameters of the algorithm
Returns
-------------
activities_list
List of activities sorted by activity name
"""
if parameters is None:
parameters = {}
activity_key = parameters[
constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY
if type(log) is pd.DataFrame:
activities = pd_attributes_filter.get_attribute_values(log, activity_key)
else:
activities = log_attributes_filter.get_attribute_values(log, activity_key)
return sorted(list(activities.keys()))
|
b0e335dd31cae3fb291317a559c721199217605f
| 3,648,696
|
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_vocab_size,
encoding_embedding_size):
"""
:return: tuple (RNN output, RNN state)
"""
embed = tf.contrib.layers.embed_sequence(rnn_inputs,
vocab_size=source_vocab_size,
embed_dim=encoding_embedding_size)
stacked_cells = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(rnn_size), keep_prob) for _ in range(num_layers)])
outputs, state = tf.nn.dynamic_rnn(stacked_cells,
embed,
dtype=tf.float32)
return outputs, state
|
3179d478478e2c7ca5d415bb23643a836127f6fe
| 3,648,697
|
def point_selection(start, end, faces):
""" Calculates the intersection points between a line segment and triangle mesh.
:param start: line segment start point
:type start: Vector3
:param end: line segment end point
:type end: Vector3
:param faces: faces: N x 9 array of triangular face vertices
:type faces: numpy.ndarray
:return: array of intersection points
:rtype: numpy.ndarray
"""
direction = end - start
length = direction.length
if length < eps or faces is None:
return np.array([])
direction /= length
distances = segment_triangle_intersection(start, direction, length, faces)
if not distances:
return np.array([])
distances = np.reshape(distances, (len(distances), 1))
return start + direction * distances
|
287295de09a5375118ed050f025fa32b0690a9a9
| 3,648,698
|
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar, and the first date is not after
the second."""
month = month2
year = year2
day = day2 - day1
if (day < 0):
day += 30
month -= 1
month = month - month1
if (month < 0):
month += 12
year -= 1
year = year - year1
return (year * 360) + month * 30 + day
|
687a7ff0b29ec2a931d872c18057741d93571ac1
| 3,648,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.