content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from datetime import datetime
def testjob(request):
"""
handler for test job request
Actual result from beanstalk instance:
* testjob triggerd at 2019-11-14 01:02:00.105119
[headers]
- Content-Type : application/json
- User-Agent : aws-sqsd/2.4
- X-Aws-Sqsd-Msgid : 6998edf8-3f19-4c69-92cf-7c919241b957
- X-Aws-Sqsd-Receive-Count : 4
- X-Aws-Sqsd-First-Received-At : 2019-11-14T00:47:00Z
- X-Aws-Sqsd-Sent-At : 2019-11-14T00:47:00Z
- X-Aws-Sqsd-Queue : awseb-e-n23e8zdd3w-stack-AWSEBWorkerQueue-1QZHOZ650P0J0
- X-Aws-Sqsd-Path : /testjob
- X-Aws-Sqsd-Sender-Id : AROA2XEFXCLXVWYXRGF4D:i-07f157f85fb97a241
- X-Aws-Sqsd-Scheduled-At : 2019-11-14T00:47:00Z
- X-Aws-Sqsd-Taskname : testjob
- Connection : close
- Host : localhost
- Content-Length : 0
[body]
b''
"""
with open("/tmp/testjob.log", "a") as f:
f.write("\n\n")
f.write(f"* testjob triggerd at {datetime.datetime.now()}\n")
f.write("[headers]\n")
for key, value in request.headers.items():
f.write(f"- {key} : {value}\n")
f.write("[body]\n")
f.write(str(request.body))
return HttpResponse(status=204) | c2a751d64e76434248029ec1805265e80ef30661 | 31,500 |
import argparse
def args_parser_test():
"""
returns argument parser object used while testing a model
"""
parser = argparse.ArgumentParser()
parser.add_argument('--architecture', type=str, metavar='arch', required=True, help='neural network architecture [vgg19, resnet50]')
parser.add_argument('--dataset',type=str, required=True, help='dataset [cifar10, cifar100, svhn, fashionmnist]')
parser.add_argument('--batch-size', type=int, default=512, help='input batch size for training (default: 512)')
parser.add_argument('--model-path',type=str, required=True, help='path to the model for finding test accuracy')
return parser | 77ce5f9cacd8cd535727fa35e8c9fb361324a29a | 31,501 |
from datetime import datetime
def todatetime(mydate):
""" Convert the given thing to a datetime.datetime.
This is intended mainly to be used with the mx.DateTime that psycopg
sometimes returns,
but could be extended in the future to take other types.
"""
if isinstance(mydate, datetime.datetime):
return mydate # Already a datetime
if not mydate:
return mydate # maybe it was None
# this works for mx.DateTime without requiring us to explicitly
# check for mx.DateTime (which is annoying if it may not even be installed)
return datetime.datetime.fromtimestamp(mydate) | 10ce9e46f539c9d12b406d65fb8fd71d75d98191 | 31,502 |
from datetime import datetime
def generate_datetime(time: str) -> datetime:
"""生成时间戳"""
today: str = datetime.now().strftime("%Y%m%d")
timestamp: str = f"{today} {time}"
dt: datetime = parse_datetime(timestamp)
return dt | f6fa6643c5f988a7e24cf807f987655803758479 | 31,503 |
def get_rgb_scores(arr_2d=None, truth=None):
"""
Returns a rgb image of pixelwise separation between ground truth and arr_2d
(predicted image) with different color codes
Easy when needed to inspect segmentation result against ground truth.
:param arr_2d:
:param truth:
:return:
"""
arr_rgb = np.zeros([arr_2d.shape[0], arr_2d.shape[1], 3], dtype=np.uint8)
for i in range(0, arr_2d.shape[0]):
for j in range(0, arr_2d.shape[1]):
if arr_2d[i, j] == 255 and truth[i, j] == 255:
arr_rgb[i, j, :] = 255
if arr_2d[i, j] == 255 and truth[i, j] == 0:
arr_rgb[i, j, 0] = 0
arr_rgb[i, j, 1] = 255
arr_rgb[i, j, 2] = 0
if arr_2d[i, j] == 0 and truth[i, j] == 255:
arr_rgb[i, j, 0] = 255
arr_rgb[i, j, 1] = 0
arr_rgb[i, j, 2] = 0
return arr_rgb | 7d5fff0ac76bf8326f9db8781221cfc7a098615d | 31,504 |
def calClassMemProb(param, expVars, classAv):
"""
Function that calculates the class membership probabilities for each observation in the
dataset.
Parameters
----------
param : 1D numpy array of size nExpVars.
Contains parameter values of class membership model.
expVars : 2D numpy array of size (nExpVars x (nDms * nClasses)).
Contains explanatory variables of class membership model.
classAv : sparse matrix of size ((nDms * nClasses) x nDms).
The (i, j)th element equals 1 if ith row in expVars corresponds to the
jth decision-maker, and 0 otherwise.
Returns
-------
p : 2D numpy array of size 1 x (nDms x nClasses).
Identifies the class membership probabilities for each individual and
each available latent class.
"""
v = np.dot(param[None, :], expVars) # v is 1 x (nDms * nClasses)
ev = np.exp(v) # ev is 1 x (nDms * nClasses)
ev[np.isinf(ev)] = 1e+20 # As precaution when exp(v) is too large for machine
ev[ev < 1e-200] = 1e-200 # As precaution when exp(v) is too close to zero
nev = ev * classAv # nev is 1 x (nDms * nClasses)
nnev = classAv * np.transpose(nev) # nnev is (nDms * nClasses) x 1
p = np.divide(ev, np.transpose(nnev)) # p is 1 x (nDms * nClasses)
p[np.isinf(p)] = 1e-200 # When the class is unavailable
return p | a77b1c6f7ec3e8379df1b91c804d0253a20898c5 | 31,505 |
from typing import List
def detect_statistical_outliers(
cloud_xyz: np.ndarray, k: int, std_factor: float = 3.0
) -> List[int]:
"""
Determine the indexes of the points of cloud_xyz to filter.
The removed points have mean distances with their k nearest neighbors
that are greater than a distance threshold (dist_thresh).
This threshold is computed from the mean (mean_distances) and
standard deviation (stddev_distances) of all the points mean distances
with their k nearest neighbors:
dist_thresh = mean_distances + std_factor * stddev_distances
:param cloud_xyz: points kdTree
:param k: number of neighbors
:param std_factor: multiplication factor to use
to compute the distance threshold
:return: list of the points to filter indexes
"""
# compute for each points, all the distances to their k neighbors
cloud_tree = cKDTree(cloud_xyz)
neighbors_distances, _ = cloud_tree.query(cloud_xyz, k + 1)
# Compute the mean of those distances for each point
# Mean is not used directly as each line
# contained the distance value to the point itself
mean_neighbors_distances = np.sum(neighbors_distances, axis=1)
mean_neighbors_distances /= k
# compute mean and standard deviation of those mean distances
# for the whole point cloud
mean_distances = np.mean(mean_neighbors_distances)
stddev_distances = np.std(mean_neighbors_distances)
# compute distance threshold and
# apply it to determine which points will be removed
dist_thresh = mean_distances + std_factor * stddev_distances
points_to_remove = np.argwhere(mean_neighbors_distances > dist_thresh)
# flatten points_to_remove
detected_points = []
for removed_point in points_to_remove:
detected_points.extend(removed_point)
return detected_points | 2e48e207c831ceb8ee0f223565d2e3570eda6c4f | 31,506 |
def collinear(cell1, cell2, column_test):
"""Determines whether the given cells are collinear along a dimension.
Returns True if the given cells are in the same row (column_test=False)
or in the same column (column_test=True).
Args:
cell1: The first geocell string.
cell2: The second geocell string.
column_test: A boolean, where False invokes a row collinearity test
and 1 invokes a column collinearity test.
Returns:
A bool indicating whether or not the given cells are collinear in the given
dimension.
"""
for i in range(min(len(cell1), len(cell2))):
x1, y1 = _subdiv_xy(cell1[i])
x2, y2 = _subdiv_xy(cell2[i])
# Check row collinearity (assure y's are always the same).
if not column_test and y1 != y2:
return False
# Check column collinearity (assure x's are always the same).
if column_test and x1 != x2:
return False
return True | f79b34c5d1c8e4eed446334b1967f5e75a679e8a | 31,507 |
def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize,
0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max() | 96457a0b00b74d269d266512188dfb4fab8d752c | 31,508 |
import pwd
import grp
import time
def stat_to_longname(st, filename):
"""
Some clients (FileZilla, I'm looking at you!)
require 'longname' field of SSH2_FXP_NAME
to be 'alike' to the output of ls -l.
So, let's build it!
Encoding side: unicode sandwich.
"""
try:
n_link = str(st.st_nlink)
except: # Some stats (e.g. SFTPAttributes of paramiko) don't have this
n_link = str('1')
longname = [
filemode(st.st_mode).decode(),
n_link,
pwd.getpwuid(st.st_uid)[0],
grp.getgrgid(st.st_gid)[0],
str(st.st_size),
time.strftime('%b %d %H:%M', time.gmtime(st.st_mtime)),
]
# add needed padding
longname = [
field + ' ' * (_paddings[i] - len(field))
for i, field in enumerate(longname)
]
longname.append(filename.decode()) # append the filename
# and return the string
return ' '.join(longname).encode() | c0a4a58ec66f2af62cef9c3fa64c8332420bfe1c | 31,509 |
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for i in range(_n):
arr.append(input())
result = solve(_n, arr)
print(result)
return result | fcd11f88715a45805fa3c1629883fc5239a02a91 | 31,510 |
def load_element_different(properties, data):
"""
Load elements which include lists of different lengths
based on the element's property-definitions.
Parameters
------------
properties : dict
Property definitions encoded in a dict where the property name is the key
and the property data type the value.
data : array
Data rows for this element.
"""
element_data = {k: [] for k in properties.keys()}
for row in data:
start = 0
for name, dt in properties.items():
length = 1
if '$LIST' in dt:
dt = dt.split('($LIST,)')[-1]
# the first entry in a list-property is the number of elements in the list
length = int(row[start])
# skip the first entry (the length), when reading the data
start += 1
end = start + length
element_data[name].append(row[start:end].astype(dt))
# start next property at the end of this one
start = end
# try converting to numpy arrays
squeeze = {k: np.array(v).squeeze() for k, v in
element_data.items()}
return squeeze | a6fe0a28bb5c05ee0a82db845b778ddc80e1bb8c | 31,511 |
def start_survey():
"""clears the session and starts the survey"""
# QUESTION: flask session is used to store temporary information. for permanent data, use a database.
# So what's the difference between using an empty list vs session. Is it just for non sens. data like user logged in or not?
# QUESTION: When using get or post methods, do we need to use redirect?
session[RESPONSES_KEY] = []
return redirect("/questions/0") | 9a9cc9aba02f31af31143f4cc33e23c78ae61ec2 | 31,512 |
def page(token):
"""``page`` property validation."""
if token.type == 'ident':
return 'auto' if token.lower_value == 'auto' else token.value | 5b120a8548d2dbcbdb080d1f804e2b693da1e5c4 | 31,513 |
import os
def create_fsns_label(image_dir, anno_file_dirs):
"""Get image path and annotation."""
if not os.path.isdir(image_dir):
raise ValueError(f'Cannot find {image_dir} dataset path.')
image_files_dict = {}
image_anno_dict = {}
images = []
img_id = 0
for anno_file_dir in anno_file_dirs:
anno_file = open(anno_file_dir, 'r').readlines()
for line in anno_file:
file_name = line.split('\t')[0]
labels = line.split('\t')[1].split('\n')[0]
image_path = os.path.join(image_dir, file_name)
if not os.path.isfile(image_path):
print(f'Cannot find image {image_path} according to annotations.')
continue
if labels:
images.append(img_id)
image_files_dict[img_id] = image_path
image_anno_dict[img_id] = labels
img_id += 1
return images, image_files_dict, image_anno_dict | 346e5a331a03d205113327abbd4d29b9817cc96c | 31,514 |
def index():
"""
Gets the the weight data and displays it to the user.
"""
# Create a base query
weight_data_query = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc())
# Get all the weight data.
all_weight_data = weight_data_query.all()
# Get the last 5 data points for a graph.
limit_weight_data = weight_data_query.limit(5).all()
# Get the chart data for the last t events.
# Reverse the array so the newest is on the right.
chart_data = [data.get_weight() for data in limit_weight_data][::-1]
label_data = [data.get_date_str() for data in limit_weight_data][::-1]
# Display the weight homepage.
return render_template('weight_view_weight.html', add_weight_form=AddWeightForm(), weight_data=all_weight_data,chart_data=chart_data,label_data=label_data) | a812dd55c5d775bcff669feb4aa55b798b2042e8 | 31,515 |
def upload_binified_data(binified_data, error_handler, survey_id_dict):
""" Takes in binified csv data and handles uploading/downloading+updating
older data to/from S3 for each chunk.
Returns a set of concatenations that have succeeded and can be removed.
Returns the number of failed FTPS so that we don't retry them.
Raises any errors on the passed in ErrorHandler."""
failed_ftps = set([])
ftps_to_retire = set([])
upload_these = []
for data_bin, (data_rows_deque, ftp_deque) in binified_data.iteritems():
# print 3
with error_handler:
try:
# print 4
study_id, user_id, data_type, time_bin, original_header = data_bin
# print 5
# data_rows_deque may be a generator; here it is evaluated
rows = list(data_rows_deque)
updated_header = convert_unix_to_human_readable_timestamps(original_header, rows)
# print 6
chunk_path = construct_s3_chunk_path(study_id, user_id, data_type, time_bin)
# print 7
old_chunk_exists = ChunkRegistry.objects.filter(chunk_path=chunk_path).exists()
if old_chunk_exists:
chunk = ChunkRegistry.objects.get(chunk_path=chunk_path)
try:
# print 8
# print chunk_path
s3_file_data = s3_retrieve(chunk_path, study_id, raw_path=True)
# print "finished s3 retrieve"
except S3ResponseError as e:
# print 9
# The following check is correct for boto version 2.38.0
if "The specified key does not exist." == e.message:
# This error can only occur if the processing gets actually interrupted and
# data files fail to upload after DB entries are created.
# Encountered this condition 11pm feb 7 2016, cause unknown, there was
# no python stacktrace. Best guess is mongo blew up.
# If this happened, delete the ChunkRegistry and push this file upload to the next cycle
chunk.remove()
raise ChunkFailedToExist("chunk %s does not actually point to a file, deleting DB entry, should run correctly on next index." % chunk_path)
raise # Raise original error if not 404 s3 error
# print 10
old_header, old_rows = csv_to_list(s3_file_data)
if old_header != updated_header:
# To handle the case where a file was on an hour boundary and placed in
# two separate chunks we need to raise an error in order to retire this file. If this
# happens AND ONE of the files DOES NOT have a header mismatch this may (
# will?) cause data duplication in the chunked file whenever the file
# processing occurs run.
raise HeaderMismatchException('%s\nvs.\n%s\nin\n%s' %
(old_header, updated_header, chunk_path) )
# print 11
old_rows = [_ for _ in old_rows]
# print "11a"
# This is O(1), which is why we use a deque (double-ended queue)
old_rows.extend(rows)
# print "11b"
del rows
# print 12
ensure_sorted_by_timestamp(old_rows)
# print 13
if data_type == SURVEY_TIMINGS:
# print "13a"
new_contents = construct_utf_safe_csv_string(updated_header, old_rows)
else:
# print "13b"
new_contents = construct_csv_string(updated_header, old_rows)
del old_rows
# print 14
upload_these.append((chunk, chunk_path, new_contents.encode("zip"), study_id))
del new_contents
else:
# print "7a"
ensure_sorted_by_timestamp(rows)
# print "7b"
if data_type == SURVEY_TIMINGS:
# print "7ba"
new_contents = construct_utf_safe_csv_string(updated_header, rows)
else:
# print "7bc"
new_contents = construct_csv_string(updated_header, rows)
# print "7c"
if data_type in SURVEY_DATA_FILES:
# We need to keep a mapping of files to survey ids, that is handled here.
# print "7da"
survey_id_hash = study_id, user_id, data_type, original_header
survey_id = survey_id_dict[survey_id_hash]
# print survey_id_hash
else:
# print "7db"
survey_id = None
# print "7e"
chunk_params = {
"study_id": study_id,
"user_id": user_id,
"data_type": data_type,
"chunk_path": chunk_path,
"time_bin": time_bin,
"survey_id": survey_id
}
upload_these.append((chunk_params, chunk_path, new_contents.encode("zip"), study_id))
except Exception as e:
# Here we catch any exceptions that may have arisen, as well as the ones that we raised
# ourselves (e.g. HeaderMismatchException). Whichever FTP we were processing when the
# exception was raised gets added to the set of failed FTPs.
failed_ftps.update(ftp_deque)
print(e)
print("failed to update: study_id:%s, user_id:%s, data_type:%s, time_bin:%s, header:%s "
% (study_id, user_id, data_type, time_bin, updated_header))
raise
else:
# If no exception was raised, the FTP has completed processing. Add it to the set of
# retireable (i.e. completed) FTPs.
ftps_to_retire.update(ftp_deque)
pool = ThreadPool(CONCURRENT_NETWORK_OPS)
errors = pool.map(batch_upload, upload_these, chunksize=1)
for err_ret in errors:
if err_ret['exception']:
print(err_ret['traceback'])
raise err_ret['exception']
pool.close()
pool.terminate()
# The things in ftps to retire that are not in failed ftps.
# len(failed_ftps) will become the number of files to skip in the next iteration.
return ftps_to_retire.difference(failed_ftps), len(failed_ftps) | 8b4499f3e5a8539a0b0fb31b44a5fe06ce5fd16b | 31,516 |
from enum import Enum
def system_get_enum_values(enum):
"""Gets all values from a System.Enum instance.
Parameters
----------
enum: System.Enum
A Enum instance.
Returns
-------
list
A list containing the values of the Enum instance
"""
return list(Enum.GetValues(enum)) | b440d5b5e3012a1708c88aea2a1bf1dc7fc02d18 | 31,517 |
def skip_leading_ws_with_indent(s,i,tab_width):
"""Skips leading whitespace and returns (i, indent),
- i points after the whitespace
- indent is the width of the whitespace, assuming tab_width wide tabs."""
count = 0 ; n = len(s)
while i < n:
ch = s[i]
if ch == ' ':
count += 1
i += 1
elif ch == '\t':
count += (abs(tab_width) - (count % abs(tab_width)))
i += 1
else: break
return i, count | e787a0a1c407902a2a946a21daf308ca94a794c6 | 31,518 |
import sys
import inspect
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start='')
return "https://github.com/cadop/seg1d/blob/master/seg1d/%s%s" % (
fn, linespec) | 60066eccd462bdc8cca16af66feb348079ed4102 | 31,519 |
import sh
def get_minibam_bed(bamfile, bedfile, minibam=None):
""" samtools view -L could do the work, but it is NOT random access. Here we
are processing multiple regions sequentially. See also:
https://www.biostars.org/p/49306/
"""
pf = op.basename(bedfile).split(".")[0]
minibamfile = minibam or op.basename(bamfile).replace(".bam", ".{}.bam".format(pf))
minisamfile = minibam.replace(".bam", ".sam")
baifile = minibamfile + ".bai"
if op.exists(baifile):
sh("rm {}".format(baifile))
cmd = "samtools view -H {} > {}".format(bamfile, minisamfile)
sh(cmd)
cmd = "cat {}".format(bedfile)
cmd += " | perl -lane 'print \"$F[0]:$F[1]-$F[2]\"'"
cmd += " | xargs -n1 -t -I \{\}"
cmd += " samtools view {}".format(bamfile)
cmd += " \{\} >> " + minisamfile
sh(cmd)
cmd = "samtools view {} -b".format(minisamfile)
cmd += " | samtools sort -"
cmd += " -o {0}".format(minibamfile)
sh(cmd)
sh("samtools index {0}".format(minibamfile))
return minibamfile | 48142e8df2468332699459a6ff0a9c455d5ad32f | 31,520 |
def create_app(config_object="tigerhacks_api.settings"):
"""Create application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split(".")[0])
logger.info("Flask app initialized")
app.config.from_object(config_object)
logger.info("Config loaded")
app.dbconn = init_database_connection(app)
logger.info("Database connection successful")
register_extensions(app)
register_blueprints(app)
register_shellcontext(app)
register_commands(app)
configure_logger(app)
logger.info("Extensions loaded")
configure_api_key(app)
configure_admin_key(app)
logger.info("API keys configured")
logger.info("Request logs will now take over.")
cors = CORS(app)
return app | 7bd2af062b770b80454b1f1fc219411fdb174a41 | 31,521 |
def dest_in_spiral(data):
"""
The map of the circuit consists of square cells. The first element in the
center is marked as 1, and continuing in a clockwise spiral, the other
elements are marked in ascending order ad infinitum. On the map, you can
move (connect cells) vertically and horizontally. For example, the distance
between cells 1 and 9 is two moves and the distance between 24 and 9 is one
move. You must help Nikola find the distance between any two elements on the
map.
Input: A list of two marks of cells (integers).
Output: The distance between the two elements. An Integer.
Find the nearest square number that the larger of the two numbers is less than.
if the nearest square number is odd it can move down sqrt(nearestsquare)-1 digits
and then left the same number. determine it's location with 1 being the origin
"""
a,b=max(data),min(data)
nearestSquare=lambda x: int(x**0.5) if (float(int(x**0.5))==x**0.5) else 1+int(x**0.5)
NRA=nearestSquare(a) # nearest square of a
NSA=NRA**2 # nearest root of a
NRB=nearestSquare(b)
NSB=NRB**2
stepsfromNSA=NSA-a
if NRA%2!=0:
if stepsfromNSA>(NRA-1):
aY=0
aX=stepsfromNSA-(NRA-1)
else:
aX=0
aY=(NRA-1)-stepsfromNSA
else:
if stepsfromNSA>(NRA-1):
aY=NRA-1
aX=(NRA-1)-(stepsfromNSA-(NRA-1))
else:
aX=NRA-1
aY=stepsfromNSA
offset=(NRA-NRB)/2
if (NRB%2==0 and NRB%2 != NRA %2):
offset+=1
stepsfromNSB=NSB-b
if NRB%2!=0:
if stepsfromNSB>(NRB-1):
bY=0
bX=stepsfromNSB-(NRB-1)
else:
bX=0
bY=(NRB-1)-stepsfromNSB
else:
if stepsfromNSB>(NRB-1):
bY=NRB-1
bX=(NRB-1)-(stepsfromNSB-(NRB-1))
else:
bX=NRB-1
bY=stepsfromNSB
bX,bY= bX+offset, bY+offset
distance=(((aX-bX)**2)**0.5)+(((aY-bY)**2)**0.5)
return distance | a84a00d111b80a3d9933d9c60565b7a31262f878 | 31,522 |
from datetime import datetime
def get_current_time():
""" returns current time w.r.t to the timezone defined in
Returns
-------
: str
time string of now()
"""
srv = get_server()
if srv.time_zone is None:
time_zone = 'UTC'
else:
time_zone = srv.time_zone
return utc_to_localtime(datetime.now(), time_zone) | 3b8d547d68bbc0f7f7f21a8a5b375cb898e53d30 | 31,523 |
import async_timeout
import aiohttp
import asyncio
async def _update_google_domains(hass, session, domain, user, password, timeout):
"""Update Google Domains."""
url = f"https://{user}:{password}@domains.google.com/nic/update"
params = {"hostname": domain}
try:
async with async_timeout.timeout(timeout):
resp = await session.get(url, params=params)
body = await resp.text()
if body.startswith("good") or body.startswith("nochg"):
return True
_LOGGER.warning("Updating Google Domains failed: %s => %s", domain, body)
except aiohttp.ClientError:
_LOGGER.warning("Can't connect to Google Domains API")
except asyncio.TimeoutError:
_LOGGER.warning("Timeout from Google Domains API for domain: %s", domain)
return False | 372137db20bdb1c410f84dfa55a48269c4f588bc | 31,524 |
def smoothen_over_time(lane_lines):
"""
Smooth the lane line inference over a window of frames and returns the average lines.
"""
avg_line_lt = np.zeros((len(lane_lines), 4))
avg_line_rt = np.zeros((len(lane_lines), 4))
for t in range(0, len(lane_lines)):
avg_line_lt[t] += lane_lines[t][0].get_coords()
avg_line_rt[t] += lane_lines[t][1].get_coords()
return Line(*np.mean(avg_line_lt, axis=0)), Line(*np.mean(avg_line_rt, axis=0)) | 64c31747ed816acbaeebdd9dc4a9e2163c3d5274 | 31,525 |
from typing import List
from typing import Optional
import random
def select_random(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]:
"""
Return a random node.
"""
return random.choice(nodes) | 7bb41abd7f135ea951dbad85e4dc7290d6191e44 | 31,526 |
def convert(from_path, ingestor, to_path, egestor, select_only_known_labels, filter_images_without_labels):
"""
Converts between data formats, validating that the converted data matches
`IMAGE_DETECTION_SCHEMA` along the way.
:param from_path: '/path/to/read/from'
:param ingestor: `Ingestor` to read in data
:param to_path: '/path/to/write/to'
:param egestor: `Egestor` to write out data
:return: (success, message)
"""
from_valid, from_msg = ingestor.validate(from_path)
if not from_valid:
return from_valid, from_msg
image_detections = ingestor.ingest(from_path)
validate_image_detections(image_detections)
image_detections = convert_labels(
image_detections=image_detections, expected_labels=egestor.expected_labels(),
select_only_known_labels=select_only_known_labels,
filter_images_without_labels=filter_images_without_labels)
egestor.egest(image_detections=image_detections, root=to_path)
return True, '' | 0407768620b3c703fec0143d2ef1297ba566ed7f | 31,527 |
import timeit
def timer(method):
"""
Method decorator to capture and print total run time in seconds
:param method: The method or function to time
:return: A function
"""
@wraps(method)
def wrapped(*args, **kw):
timer_start = timeit.default_timer()
result = method(*args, **kw)
timer_finish = timeit.default_timer()
print('%r %2.2f s' %
(method.__name__, round((timer_finish - timer_start), 2)))
return result
return wrapped | 526a7b78510efb0329fba7da2f4c24a6d35c2266 | 31,528 |
def macro_states(macro_df, style, roll_window):
"""
Function to convert macro factors into binary states
Args:
macro_df (pd.DataFrame): contains macro factors data
style (str): specify method used to classify. Accepted values:
'naive'
roll_window (int): specify rolling window in months
Returns:
state_df (pd.DataFrame): macro factors classified to binary states.
1 for up and 0 for down
"""
# style='naive'; roll_window=60
if style == 'naive':
# Classify on the basis of a rolling median
roll_median = macro_df.rolling(roll_window).median()
state_df = macro_df >= roll_median
state_df = state_df[pd.notnull(roll_median)].dropna(how='all')
state_df.replace(0, -1, inplace=True)
state_df.fillna(0, inplace=True)
return state_df | 1d4862cfb43aeebd33e71bc67293cbd7b62eb7b5 | 31,529 |
import torch
def get_sparsity(lat):
"""Return percentage of nonzero slopes in lat.
Args:
lat (Lattice): instance of Lattice class
"""
# Initialize operators
placeholder_input = torch.tensor([[0., 0]])
op = Operators(lat, placeholder_input)
# convert z, L, H to np.float64 (simplex requires this)
L_mat_sparse = op.L_mat_sparse.astype(np.float64)
z = lat.flattened_C
# # compute ||Lz||_1
# htv_loss = np.linalg.norm(L_z, ord=1)
# print('HTV: {:.2f}'.format(htv_loss))
# compute ||Lz||_0
L_z = L_mat_sparse.dot(z.numpy())
L_z_zero_idx = np.where(np.absolute(L_z) <= SPARSITY_EPS)[0]
fraction_zero = 1.
if L_z.shape[0] != 0:
fraction_zero = L_z_zero_idx.shape[0] / L_z.shape[0]
percentage_nonzero = (100. - fraction_zero * 100)
return percentage_nonzero | 703bd061b662a20b7ebce6111442bb6597fddaec | 31,530 |
def XYZ_to_Kim2009(
XYZ: ArrayLike,
XYZ_w: ArrayLike,
L_A: FloatingOrArrayLike,
media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"],
surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"],
discount_illuminant: Boolean = False,
n_c: Floating = 0.57,
) -> CAM_Specification_Kim2009:
"""
Computes the *Kim, Weyrich and Kautz (2009)* colour appearance model
correlates from given *CIE XYZ* tristimulus values.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of test sample / stimulus.
XYZ_w
*CIE XYZ* tristimulus values of reference white.
L_A
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
to be 20% of the luminance of a white object in the scene).
media
Media parameters.
surround
Surround viewing conditions induction factors.
discount_illuminant
Truth value indicating if the illuminant should be discounted.
n_c
Cone response sigmoidal curve modulating factor :math:`n_c`.
Returns
-------
:class:`colour.CAM_Specification_Kim2009`
*Kim, Weyrich and Kautz (2009)* colour appearance model specification.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+---------------------------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+=================================+=======================+===============+
| ``CAM_Specification_Kim2009.J`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.C`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.h`` | [0, 360] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.s`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.Q`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.M`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.H`` | [0, 400] | [0, 1] |
+---------------------------------+-----------------------+---------------+
References
----------
:cite:`Kim2009`
Examples
--------
>>> XYZ = np.array([19.01, 20.00, 21.78])
>>> XYZ_w = np.array([95.05, 100.00, 108.88])
>>> L_A = 318.31
>>> media = MEDIA_PARAMETERS_KIM2009['CRT Displays']
>>> surround = VIEWING_CONDITIONS_KIM2009['Average']
>>> XYZ_to_Kim2009(XYZ, XYZ_w, L_A, media, surround)
... # doctest: +ELLIPSIS
CAM_Specification_Kim2009(J=28.8619089..., C=0.5592455..., \
h=219.0480667..., s=9.3837797..., Q=52.7138883..., M=0.4641738..., \
H=278.0602824..., HC=None)
"""
XYZ = to_domain_100(XYZ)
XYZ_w = to_domain_100(XYZ_w)
_X_w, Y_w, _Z_w = tsplit(XYZ_w)
L_A = as_float_array(L_A)
# Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
# sharpened *RGB* values.
RGB = vector_dot(CAT_CAT02, XYZ)
RGB_w = vector_dot(CAT_CAT02, XYZ_w)
# Computing degree of adaptation :math:`D`.
D = (
degree_of_adaptation(surround.F, L_A)
if not discount_illuminant
else ones(L_A.shape)
)
# Computing full chromatic adaptation.
XYZ_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)
XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)
# Converting to *Hunt-Pointer-Estevez* colourspace.
LMS = RGB_to_rgb(XYZ_c)
LMS_w = RGB_to_rgb(XYZ_wc)
# Cones absolute response.
LMS_n_c = spow(LMS, n_c)
LMS_w_n_c = spow(LMS_w, n_c)
L_A_n_c = spow(L_A, n_c)
LMS_p = LMS_n_c / (LMS_n_c + L_A_n_c)
LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c)
# Achromatic signal :math:`A` and :math:`A_w`.
v_A = np.array([40, 20, 1])
A = np.sum(v_A * LMS_p, axis=-1) / 61
A_w = np.sum(v_A * LMS_wp, axis=-1) / 61
# Perceived *Lightness* :math:`J_p`.
a_j, b_j, o_j, n_j = 0.89, 0.24, 0.65, 3.65
A_A_w = A / A_w
J_p = spow(
(-(A_A_w - b_j) * spow(o_j, n_j)) / (A_A_w - b_j - a_j), 1 / n_j
)
# Computing the media dependent *Lightness* :math:`J`.
J = 100 * (media.E * (J_p - 1) + 1)
# Computing the correlate of *brightness* :math:`Q`.
n_q = 0.1308
Q = J * spow(Y_w, n_q)
# Opponent signals :math:`a` and :math:`b`.
a = (1 / 11) * np.sum(np.array([11, -12, 1]) * LMS_p, axis=-1)
b = (1 / 9) * np.sum(np.array([1, 1, -2]) * LMS_p, axis=-1)
# Computing the correlate of *chroma* :math:`C`.
a_k, n_k = 456.5, 0.62
C = a_k * spow(np.sqrt(a ** 2 + b ** 2), n_k)
# Computing the correlate of *colourfulness* :math:`M`.
a_m, b_m = 0.11, 0.61
M = C * (a_m * np.log10(Y_w) + b_m)
# Computing the correlate of *saturation* :math:`s`.
s = 100 * np.sqrt(M / Q)
# Computing the *hue* angle :math:`h`.
h = np.degrees(np.arctan2(b, a)) % 360
# Computing hue :math:`h` quadrature :math:`H`.
H = hue_quadrature(h)
return CAM_Specification_Kim2009(
as_float(from_range_100(J)),
as_float(from_range_100(C)),
as_float(from_range_degrees(h)),
as_float(from_range_100(s)),
as_float(from_range_100(Q)),
as_float(from_range_100(M)),
as_float(from_range_degrees(H, 400)),
None,
) | bf694c7a66052b3748f561018d253d2dfcdfc8df | 31,531 |
from typing import Union
from pathlib import Path
from typing import Optional
def load_capsule(path: Union[str, Path],
source_path: Optional[Path] = None,
key: Optional[str] = None,
inference_mode: bool = True) -> BaseCapsule:
"""Load a capsule from the filesystem.
:param path: The path to the capsule file
:param source_path: The path to the capsule's source code, if it's
available at runtime
:param key: The AES key to decrypt the capsule with, or None if the capsule
is not encrypted
:param inference_mode: If True, the backends for this capsule will be
started. If False, the capsule will never be able to run inference, but
it will still have it's various readable attributes.
"""
path = Path(path)
if source_path is None:
# Set the default source path to a directory alongside the capsule file
source_path = path.absolute().with_suffix("")
return load_capsule_from_bytes(
data=path.read_bytes(),
source_path=source_path,
key=key,
inference_mode=inference_mode,
) | f6810bdb82ab734e2bd424feee76f11da18cccf4 | 31,532 |
def geodetic2ecef(lat, lon, alt):
"""Convert geodetic coordinates to ECEF."""
lat, lon = radians(lat), radians(lon)
xi = sqrt(1 - esq * sin(lat))
x = (a / xi + alt) * cos(lat) * cos(lon)
y = (a / xi + alt) * cos(lat) * sin(lon)
z = (a / xi * (1 - esq) + alt) * sin(lat)
return x, y, z | 43654b16d89eeeee0aa411f40dc12d5c12637e80 | 31,533 |
def processor_group_size(nprocs, number_of_tasks):
"""
Find the number of groups to divide `nprocs` processors into to tackle `number_of_tasks` tasks.
When `number_of_tasks` > `nprocs` the smallest integer multiple of `nprocs` is returned that
equals or exceeds `number_of_tasks` is returned.
When `number_of_tasks` < `nprocs` the smallest divisor of `nprocs` that equals or exceeds
`number_of_tasks` is returned.
Parameters
----------
nprocs : int
The number of processors to divide into groups.
number_of_tasks : int or float
The number of tasks to perform, which can also be seen as the *desired* number of
processor groups. If a floating point value is given the next highest integer is
used.
Returns
-------
int
"""
if number_of_tasks >= nprocs:
return nprocs * int(_np.ceil(1. * number_of_tasks / nprocs))
else:
fctrs = sorted(_prime_factors(nprocs)); i = 1
if int(_np.ceil(number_of_tasks)) in fctrs:
return int(_np.ceil(number_of_tasks)) # we got lucky
while _np.product(fctrs[0:i]) < number_of_tasks: i += 1
return _np.product(fctrs[0:i]) | f6d9a760d79ff59c22b3a95cc56808ba142c4045 | 31,534 |
def skin_base_url(skin, variables):
""" Returns the skin_base_url associated to the skin.
"""
return variables \
.get('skins', {}) \
.get(skin, {}) \
.get('base_url', '') | 80de82862a4a038328a6f997cc29e6bf1ed44eb8 | 31,535 |
from typing import Union
import torch
import os
import warnings
def load(
name: str,
device: Union[str, torch.device] = 'cuda' if torch.cuda.is_available() else 'cpu',
jit: bool = False,
download_root: str = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses '~/.cache/clip'
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(
_S3_BUCKET + _MODELS[name],
download_root or os.path.expanduser('~/.cache/clip'),
with_resume=True,
)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f'Model {name} not found; available models = {available_models()}'
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else 'cpu').eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f'File {model_path} is not a JIT archive. Loading as a state dict instead'
)
jit = False
state_dict = torch.load(model_path, map_location='cpu')
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == 'cpu':
model.float()
return (
model,
_transform_ndarray(model.visual.input_resolution),
)
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes('prim::Constant')
if 'Device' in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, 'graph') else []
except RuntimeError:
graphs = []
if hasattr(module, 'forward1'):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes('prim::Constant'):
if 'value' in node.attributeNames() and str(node['value']).startswith(
'cuda'
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == 'cpu':
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode('aten::to').inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, 'graph') else []
except RuntimeError:
graphs = []
if hasattr(module, 'forward1'):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes('aten::to'):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()['value'] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return (
model,
_transform_ndarray(model.input_resolution.item()),
) | f99c7bdddfe0c92d83d6931b475ec55dc85fb07b | 31,536 |
import os
def default_pre_training_callbacks(
logger=default_logger,
with_lr_finder=False,
with_export_augmentations=True,
with_reporting_server=True,
with_profiler=False,
additional_callbacks=None):
"""
Default callbacks to be performed before the fitting of the model
"""
callbacks = []
if with_reporting_server:
callbacks.append(callback_reporting_start_server.CallbackReportingStartServer())
callbacks += [
callback_zip_sources.CallbackZipSources(folders_to_record=os.path.join(os.path.dirname(__file__), '..', '..')),
callback_reporting_model_summary.CallbackReportingModelSummary(),
callback_reporting_dataset_summary.CallbackReportingDatasetSummary(),
callback_reporting_export_samples.CallbackReportingExportSamples(table_name='random_samples'),
]
if with_profiler:
callbacks.append(callback_profiler.CallbackProfiler())
if with_export_augmentations:
callbacks.append(callback_reporting_augmentations.CallbackReportingAugmentations())
if with_lr_finder:
# this may take some time, hence the reason it is disabled by default
callbacks.append(callback_learning_rate_finder.CallbackLearningRateFinder())
if additional_callbacks is not None:
callbacks += additional_callbacks
return callbacks | bef795f2db89b4cd443a4716baabcbd7a26a0f37 | 31,537 |
import json
def validate_dumpling(dumpling_json):
"""
Validates a dumpling received from (or about to be sent to) the dumpling
hub. Validation involves ensuring that it's valid JSON and that it includes
a ``metadata.chef`` key.
:param dumpling_json: The dumpling JSON.
:raise: :class:`netdumplings.exceptions.InvalidDumpling` if the
dumpling is invalid.
:return: A dict created from the dumpling JSON.
"""
try:
dumpling = json.loads(dumpling_json)
except json.JSONDecodeError as e:
raise InvalidDumpling("Could not interpret dumpling JSON")
try:
dumpling['metadata']['chef']
except (KeyError, TypeError) as e:
raise InvalidDumpling("Could not determine chef name")
return dumpling | 7d6885a69fe40fa8531ae58c373a1b1161b1df49 | 31,538 |
def check_gradient(func,atol=1e-8,rtol=1e-5,quiet=False):
""" Test gradient function with a set of MC photons.
This works with either LCPrimitive or LCTemplate objects.
TODO -- there is trouble with the numerical gradient when
a for the location-related parameters when the finite step
causes the peak to shift from one side of an evaluation phase
to the other."""
en = np.random.rand(1000)*2 + 1 # 100 MeV to 10 GeV
ph = func.random(en)
if hasattr(func,'closest_to_peak'):
eps = min(1e-6,0.2*func.closest_to_peak(ph))
else:
eps = 1e-6
g1 = func.gradient(ph,en,free=False)
g2 = func.approx_gradient(ph,en,eps=eps)
anyfail = False
for i in range(g1.shape[0]):
d1 = np.abs(g1[i]-g2[i])
a = np.argmax(d1)
fail = np.any(d1 > (atol + rtol*np.abs(g2)))
if not quiet:
pass_string = 'FAILED' if fail else 'passed'
print ('%02d (%s) %.3g (abs)'%(i,pass_string,d1.max()))
anyfail = anyfail or fail
return not anyfail | 1acb91e7ed4508fb0c987b6e2d21c0ce86081d28 | 31,539 |
def _recurse_to_best_estimate(
lower_bound, upper_bound, num_entities, sample_sizes
):
"""Recursively finds the best estimate of population size by identifying
which half of [lower_bound, upper_bound] contains the best estimate.
Parameters
----------
lower_bound: int
The lower bound of the interval to be tested; the value of the error
function can always be assumed to be positive at this point.
upper_bound: int
The upper bound of the interval to be tested; the value of the error
function can always be assumed to be negative at this point.
num_entities: int
The number of distinct entities observed.
sample_sizes: list
A list of integers indicating the size of each sample taken.
Returns
-------
int
The best estimate of population size.
"""
# Base case - return the upper bound when the upper and lower bounds are
# adjacent
if upper_bound - lower_bound <= 1:
return upper_bound
# Otherwise calculate error at midpoint and recursively evaluate the
# relevant half of the interval
midpoint = int(np.ceil((lower_bound + upper_bound) / 2))
error_at_midpoint = _calculate_error(midpoint, num_entities, sample_sizes)
if error_at_midpoint > 0:
return _recurse_to_best_estimate(
midpoint, upper_bound, num_entities, sample_sizes
)
else:
return _recurse_to_best_estimate(
lower_bound, midpoint, num_entities, sample_sizes
) | 969b550da712682ae620bb7158ed623785ec14f5 | 31,540 |
def betwix(iterable, start=None, stop=None, inc=False):
""" Extract selected elements from an iterable. But unlike `islice`,
extract based on the element's value instead of its position.
Args:
iterable (iter): The initial sequence
start (str): The fragment to begin with (inclusive)
stop (str): The fragment to finish at (exclusive)
inc (bool): Make stop operate inclusively (useful if reading a file and
the start and stop fragments are on the same line)
Returns:
Iter: New dict with specified keys removed
Examples:
>>> from io import StringIO
>>>
>>> list(betwix('ABCDEFG', stop='C')) == ['A', 'B']
True
>>> list(betwix('ABCDEFG', 'C', 'E')) == ['C', 'D']
True
>>> list(betwix('ABCDEFG', 'C')) == ['C', 'D', 'E', 'F', 'G']
True
>>> f = StringIO('alpha\\n<beta>\\ngamma\\n')
>>> list(betwix(f, '<', '>', True)) == ['<beta>\\n']
True
>>> list(betwix('ABCDEFG', 'C', 'E', True)) == ['C', 'D', 'E']
True
"""
def inc_takewhile(predicate, _iter):
for x in _iter:
yield x
if not predicate(x):
break
get_pred = lambda sentinel: lambda x: sentinel not in x
pred = get_pred(stop)
first = it.dropwhile(get_pred(start), iterable) if start else iterable
if stop and inc:
last = inc_takewhile(pred, first)
elif stop:
last = it.takewhile(pred, first)
else:
last = first
return last | e1079158429e7d25fee48222d5ac734c0456ecfe | 31,541 |
import logging
def map_configuration(config: dict) -> tp.List[MeterReaderNode]: # noqa MC0001
"""
Parsed configuration
:param config: dict from
:return:
"""
# pylint: disable=too-many-locals, too-many-nested-blocks
meter_reader_nodes = []
if 'devices' in config and 'middleware' in config:
try:
if config.get('middleware').get('type') == 'volkszaehler':
gateway = VolkszaehlerGateway(config.get('middleware').get('middleware_url'),
config.get('middleware').get('interpolate', True))
else:
logging.error(f'Middleware "{config.get("middleware").get("type")}" not supported!')
gateway = None
if gateway:
for device in config.get('devices').values():
meter_id = strip(str(device.pop('id')))
protocol = strip(device.pop('protocol'))
channels = device.pop('channels')
if protocol == 'SML':
reader = SmlReader(meter_id, **device)
elif protocol == 'PLAIN':
reader = PlainReader(meter_id, **device)
elif protocol == 'BME280':
reader = Bme280Reader(meter_id, **device)
else:
logging.error(f'Unsupported protocol {protocol}')
reader = None
sample = reader.poll()
if sample is not None:
available_channels = {}
for variable in sample.channels:
obj_name = variable.get('objName', '')
for channel_name, channel in channels.items():
interval = humanfriendly_time_parser(channel.get('interval', '1h'))
uuid = channel.get('uuid')
factor = channel.get('factor', 1)
if strip(str(channel_name)) in strip(str(obj_name)):
# Replacing config string with exact match
available_channels[obj_name] = (uuid, interval, factor)
if available_channels:
meter_reader_node = MeterReaderNode(available_channels,
reader,
gateway)
# Perform first push to middleware
if meter_reader_node.poll_and_push(sample):
meter_reader_nodes.append(meter_reader_node)
else:
logging.error(f"Not registering node for meter id {reader.meter_id}.")
else:
logging.warning(f"Cannot register channels for meter {meter_id}.")
else:
logging.warning(f"Could not read meter id {meter_id} using protocol {protocol}.")
except KeyError as err:
logging.error(f"Error while processing configuration: {err}")
else:
logging.error("Config file is incomplete.")
return meter_reader_nodes | 0d9212850547f06583d71d8d9b7e2995bbf701d5 | 31,542 |
def places(client, query, location=None, radius=None, language=None,
min_price=None, max_price=None, open_now=False, type=None, region=None,
page_token=None):
"""
Places search.
:param query: The text string on which to search, for example: "restaurant".
:type query: string
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0 (most affordable)
to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range from 0 (most
affordable) to 4 (most expensive).
:type max_price: int
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results
"""
return _places(client, "text", query=query, location=location,
radius=radius, language=language, min_price=min_price,
max_price=max_price, open_now=open_now, type=type, region=region,
page_token=page_token) | 50aea370006d5d016b7ecd943abc2deba382212d | 31,543 |
def load_data(_file, pct_split):
"""Load test and train data into a DataFrame
:return pd.DataFrame with ['test'/'train', features]"""
# load train and test data
data = pd.read_csv(_file)
# split into train and test using pct_split
# data_train = ...
# data_test = ...
# concat and label
# data_out = pd.concat([data_train, data_test], keys=['train', 'test'])
data_out = data
return data_out | 1a02f83aba497bc58e54c262c3f42386938ee9bd | 31,544 |
def sorted_items(d, key=None, reverse=False):
"""Given a dictionary `d` return items: (k1, v1), (k2, v2)... sorted in
ascending order according to key.
:param dict d: dictionary
:param key: optional function remapping key
:param bool reverse: If True return in descending order instead of default ascending
"""
if d is None:
return []
key = toolz.first if key is None else toolz.comp(key, toolz.first)
return sorted(d.items(), key=key, reverse=reverse) | 4e4302eebe2955cdd5d5266a65eac3acf874474a | 31,545 |
import sys
def factorize(eri_full, rank):
""" Do single factorization of the ERI tensor
Args:
eri_full (np.ndarray) - 4D (N x N x N x N) full ERI tensor
rank (int) - number of vectors to retain in ERI rank-reduction procedure
Returns:
eri_rr (np.ndarray) - 4D approximate ERI tensor reconstructed from LR vec
LR (np.ndarray) - 3D (N x N x rank) tensor containing SF vectors
"""
n_orb = eri_full.shape[0]
assert n_orb**4 == len(eri_full.flatten())
L = eigendecomp(eri_full.reshape(n_orb**2, n_orb**2), tol=1e-16)
# Do rank-reduction of ERIs following ERI factorization
if rank is None:
LR = L[:, :]
else:
LR = L[:, :rank]
eri_rr = np.einsum('ik,kj->ij', LR, LR.T, optimize=True)
eri_rr = eri_rr.reshape(n_orb, n_orb, n_orb, n_orb)
LR = LR.reshape(n_orb, n_orb, -1)
if rank is not None:
try:
assert LR.shape[2] == rank
except AssertionError:
sys.exit(
"LR.shape: %s\nrank: %s\nLR.shape and rank are inconsistent"
% (LR.shape, rank))
return eri_rr, LR | 1019c8bde59e0567d16b18da923e7902e8ba572e | 31,546 |
def randint_population(shape, max_value, min_value=0):
"""Generate a random population made of Integers
Args:
(set of ints): shape of the population. Its of the form
(num_chromosomes, chromosome_dim_1, .... chromesome_dim_n)
max_value (int): Maximum value taken by a given gene.
min_value (int, optional): Min value a gene can take. Defaults to 0.
Returns:
Tensor: random population.
"""
high = max_value + 1
return B.randint(low=min_value, high=high, shape=shape, dtype=B.intx()) | 79cbc5ceba4ecb3927976c10c8990b167f208c0e | 31,547 |
def simplex_creation(
mean_value: np.array, sigma_variation: np.array, rng: RandomNumberGenerator = None
) -> np.array:
"""
Creation of the simplex
@return:
"""
ctrl_par_number = mean_value.shape[0]
##################
# Scale matrix:
# Explain what the scale matrix means here
##################
# First row
x0_scale = np.zeros((1, ctrl_par_number))
# Simplex matrix ( without first row )
simplex_matrix = np.diag(np.ones_like(sigma_variation))
# Add random number in the first column
if rng is None:
random_array = np.random.rand(ctrl_par_number)
else:
random_array = rng.get_random_numbers(ctrl_par_number)
random_array = random_array.reshape(
ctrl_par_number,
)
simplex_matrix[0, :] += np.sqrt(3) * (random_array - 0.5) * 2
# Orthogonalize set of vectors with gram_schmidt, and rescale with the normalization length
simplex_matrix_orthonormal = gram_schmidt(simplex_matrix.T)
# Rescale the vector with the sigma variation
simplex_matrix_orthogonal_rescaled = simplex_matrix_orthonormal @ np.diag(
sigma_variation
)
# Add the first row containing only zeros
x_t_norm = np.append(x0_scale, simplex_matrix_orthogonal_rescaled, axis=0)
# Offset matrix
x_offset = np.outer(np.ones((1, ctrl_par_number + 1)), mean_value)
# Start simplex matrix
StartSimplex = x_t_norm + x_offset
return StartSimplex | a25ac6b6f92acb5aaa1d50f6c9a5d8d5caa02639 | 31,548 |
def _scale_db(out, data, mask, vmins, vmaxs, scale=1.0, offset=0.0):
# pylint: disable=too-many-arguments
""" decibel data scaling. """
vmins = [0.1*v for v in vmins]
vmaxs = [0.1*v for v in vmaxs]
return _scale_log10(out, data, mask, vmins, vmaxs, scale, offset) | dab3125f7d8b03ff5141e9f97f470211416f430c | 31,549 |
def make_tree(anime):
"""
Creates anime tree
:param anime: Anime
:return: AnimeTree
"""
tree = AnimeTree(anime)
# queue for BFS
queue = deque()
root = tree.root
queue.appendleft(root)
# set for keeping track of visited anime
visited = {anime}
# BFS downwards
while len(queue) > 0:
current = queue.pop()
related = current.anime.related
for relation in related:
if relation.lower() in CHILDREN:
for item in related[relation]:
child = Anime(jikan.anime(item['mal_id']))
node = tree.add_child(child=child, parent=current)
visited.add(node)
queue.appendleft(child)
parent_id = 0
# Search for parent upwards
while parent_id is not None:
related = root.anime.related
parent_id = None
for i in PARENT:
if i in related:
parent_id = related[i][0]['mal_id']
break
if parent_id is None:
break
parent = Anime(jikan.anime(parent_id))
node = tree.add_parent(parent=parent, child=root)
root = node
visited.add(root)
queue.appendleft(parent)
# BFS new root
while len(queue) > 0:
current = queue.pop()
if current is None:
continue
related = current.anime.related
for relation in related:
if relation.lower() in CHILDREN:
for item in related[relation]:
child = Anime(jikan.anime(item['mal_id']))
node = tree.add_child(child=child, parent=current)
if node in visited:
continue
visited.add(node)
queue.appendleft(child)
return tree | d93257e32b024b48668e7c02e534a31e54b4665d | 31,550 |
def draw_bboxes(images, # type: thelper.typedefs.InputType
preds=None, # type: Optional[thelper.typedefs.AnyPredictionType]
bboxes=None, # type: Optional[thelper.typedefs.AnyTargetType]
color_map=None, # type: Optional[thelper.typedefs.ClassColorMap]
redraw=None, # type: Optional[thelper.typedefs.DrawingType]
block=False, # type: Optional[bool]
min_confidence=0.5, # type: thelper.typedefs.Number
class_map=None, # type: Optional[thelper.typedefs.ClassIdType, AnyStr]
**kwargs # type: Any
):
"""Draws a set of bounding box prediction results on images.
Args:
images: images with first dimension as list index, and other dimensions are each image's content
preds: predicted bounding boxes per image to be displayed, must match images count if provided
bboxes: ground truth (targets) bounding boxes per image to be displayed, must match images count if provided
color_map: mapping of class-id to color to be applied to drawn bounding boxes on the image
redraw: existing figure and axes to reuse for drawing the new images and bounding boxes
block: indicate whether to block execution until all figures have been closed or not
min_confidence: ignore display of bounding boxes that have a confidence below this value, if available
class_map: alternative class-id to class-name mapping to employ for display.
This overrides the default class names retrieved from each bounding box's attributed task.
Useful for displaying generic bounding boxes obtained from raw input values without a specific task.
kwargs: other arguments to be passed down to further drawing functions or drawing settings
(amongst other settings, box_thickness, font_thickness and font_scale can be provided)
"""
def get_class_name(_bbox):
if isinstance(class_map, dict):
return class_map[_bbox.class_id]
elif bbox.task is not None:
return _bbox.task.class_names[_bbox.class_id]
else:
raise RuntimeError("could not find class name from either class mapping or bbox task definition")
image_list = [get_displayable_image(images[batch_idx, ...]) for batch_idx in range(images.shape[0])]
if color_map is not None and isinstance(color_map, dict):
assert len(color_map) <= 256, "too many indices for uint8 map"
color_map_new = np.zeros((256, 3), dtype=np.uint8)
for idx, val in color_map.items():
color_map_new[idx, ...] = val
color_map = color_map_new.tolist()
nb_imgs = len(image_list)
grid_size_x, grid_size_y = nb_imgs, 1 # all images on one row, by default (add gt and preds as extra rows)
box_thickness = thelper.utils.get_key_def("box_thickness", kwargs, default=2, delete=True)
font_thickness = thelper.utils.get_key_def("font_thickness", kwargs, default=1, delete=True)
font_scale = thelper.utils.get_key_def("font_scale", kwargs, default=0.4, delete=True)
if preds is not None:
assert len(image_list) == len(preds)
for preds_list, image in zip(preds, image_list):
for bbox_idx, bbox in enumerate(preds_list):
assert isinstance(bbox, thelper.data.BoundingBox), "unrecognized bbox type"
if bbox.confidence is not None and bbox.confidence < min_confidence:
continue
color = get_bgr_from_hsl(bbox_idx / len(preds_list) * 360, 1.0, 0.5) \
if color_map is None else color_map[bbox.class_id]
conf = ""
if thelper.utils.is_scalar(bbox.confidence):
conf = f" ({bbox.confidence:.3f})"
elif isinstance(bbox.confidence, (list, tuple, np.ndarray)):
conf = f" ({bbox.confidence[bbox.class_id]:.3f})"
draw_bbox(image, bbox.top_left, bbox.bottom_right, f"{get_class_name(bbox)} {conf}",
color, box_thickness=box_thickness, font_thickness=font_thickness, font_scale=font_scale)
if bboxes is not None:
assert len(image_list) == len(bboxes), "mismatched bboxes list and image list sizes"
clean_image_list = [get_displayable_image(images[batch_idx, ...]) for batch_idx in range(images.shape[0])]
for bboxes_list, image in zip(bboxes, clean_image_list):
for bbox_idx, bbox in enumerate(bboxes_list):
assert isinstance(bbox, thelper.data.BoundingBox), "unrecognized bbox type"
color = get_bgr_from_hsl(bbox_idx / len(bboxes_list) * 360, 1.0, 0.5) \
if color_map is None else color_map[bbox.class_id]
draw_bbox(image, bbox.top_left, bbox.bottom_right, f"GT: {get_class_name(bbox)}",
color, box_thickness=box_thickness, font_thickness=font_thickness, font_scale=font_scale)
grid_size_y += 1
image_list += clean_image_list
return draw_images(image_list, redraw=redraw, window_name="detections", block=block,
grid_size_x=grid_size_x, grid_size_y=grid_size_y, **kwargs) | 6e82ee3ad211166ad47c0aae048246052de2d21c | 31,551 |
def html_table_from_dict(data, ordering):
"""
>>> ordering = ['administrators', 'key', 'leader', 'project']
>>> data = [ \
{'key': 'DEMO', 'project': 'Demonstration', 'leader': 'leader@example.com', 'administrators': ['admin1@example.com', 'admin2@example.com']}, \
{'key': 'FOO', 'project': 'Foo', 'leader': 'foo@example.com', 'administrators': ['foo-admin1@example.com', 'foo-admin2@example.com']}, \
{'key': 'BAR', 'project': 'Bar', 'leader': 'bar@example.com', 'administrators': ['bar-admin1@example.com', 'bar-admin2@example.com']}]
>>> html_table_from_dict(data, ordering)
'<table><tbody>\\n<tr><th>Administrators</th><th>Key</th><th>Leader</th><th>Project</th></tr>\\n<tr><td><ul><li><a href="mailto:admin1@example.com">admin1@example.com</a></li><li><a href="mailto:admin2@example.com">admin2@example.com</a></li></ul></td><td>DEMO</td><td>leader@example.com</td><td>Demonstration</td></tr>\\n<tr><td><ul><li><a href="mailto:foo-admin1@example.com">foo-admin1@example.com</a></li><li><a href="mailto:foo-admin2@example.com">foo-admin2@example.com</a></li></ul></td><td>FOO</td><td>foo@example.com</td><td>Foo</td></tr>\\n<tr><td><ul><li><a href="mailto:bar-admin1@example.com">bar-admin1@example.com</a></li><li><a href="mailto:bar-admin2@example.com">bar-admin2@example.com</a></li></ul></td><td>BAR</td><td>bar@example.com</td><td>Bar</td></tr>\\n</tbody></table>'
>>> ordering = ['key', 'project', 'leader', 'administrators']
>>> html_table_from_dict(data, ordering)
'<table><tbody>\\n<tr><th>Key</th><th>Project</th><th>Leader</th><th>Administrators</th></tr>\\n<tr><td>DEMO</td><td>Demonstration</td><td>leader@example.com</td><td><ul><li><a href="mailto:admin1@example.com">admin1@example.com</a></li><li><a href="mailto:admin2@example.com">admin2@example.com</a></li></ul></td></tr>\\n<tr><td>FOO</td><td>Foo</td><td>foo@example.com</td><td><ul><li><a href="mailto:foo-admin1@example.com">foo-admin1@example.com</a></li><li><a href="mailto:foo-admin2@example.com">foo-admin2@example.com</a></li></ul></td></tr>\\n<tr><td>BAR</td><td>Bar</td><td>bar@example.com</td><td><ul><li><a href="mailto:bar-admin1@example.com">bar-admin1@example.com</a></li><li><a href="mailto:bar-admin2@example.com">bar-admin2@example.com</a></li></ul></td></tr>\\n</tbody></table>'
"""
html = '<table><tbody>'
html += html_table_header_row(ordering)
for row in data:
html += html_row_with_ordered_headers(row, ordering)
return html + '\n</tbody></table>' | f3a77977c3341adf08af17cd3d907e2f12d5a093 | 31,552 |
import random
def getRandomChests(numChests):
"""Return a list of (x, y) integer tuples that represent treasure
chest locations."""
chests = []
while len(chests) < numChests:
newChest = [random.randint(0, BOARD_WIDTH - 1),
random.randint(0, BOARD_HEIGHT - 1)]
# Make sure a chest is not already there:
if newChest not in chests:
chests.append(newChest)
return chests | 285b35379f8dc8c13b873ac77c1dcac59e26ccef | 31,553 |
import random
def random_tolerance(value, tolerance):
"""Generate a value within a small tolerance.
Credit: /u/LightShadow on Reddit.
Example::
>>> time.sleep(random_tolerance(1.0, 0.01))
>>> a = random_tolerance(4.0, 0.25)
>>> assert 3.0 <= a <= 5.0
True
"""
value = float(value)
if tolerance == 0.0:
return value
return value + value * random.uniform(-tolerance, tolerance) | abe631db8a520de788540f8e0973537306872bde | 31,554 |
def routes_stations():
"""The counts of stations of routes."""
return jsonify(
[
(n.removeprefix("_"), int(c))
for n, c in r.zrange(
"Stats:Route.stations", 0, 14, desc=True, withscores=True
)
]
) | 2e0e865681c2e47da6da5f5cbd9dc5b130721233 | 31,555 |
import math
def montage(packed_ims, axis):
"""display as an Image the contents of packed_ims in a square gird along an aribitray axis"""
if packed_ims.ndim == 2:
return packed_ims
# bring axis to the front
packed_ims = np.rollaxis(packed_ims, axis)
N = len(packed_ims)
n_tile = math.ceil(math.sqrt(N))
rows = []
for i in range(n_tile):
if i*n_tile > N: continue
im = packed_ims[i * n_tile]
for j in range(1, n_tile):
ind = i * n_tile + j
if ind < N:
im = utils.hstack(im, packed_ims[ind])
else:
im = utils.hstack(im, np.zeros_like(packed_ims[0]))
rows.append(im)
matrix = rows[0]
for i in range(1, len(rows)):
matrix = utils.vstack(matrix, rows[i])
return matrix | 27d2de01face567a1caa618fc2a025ec3adf2c8c | 31,556 |
def blocks2image(Blocks, blocks_image):
""" Function to stitch the blocks back to the original image
input: Blocks --> the list of blocks (2d numpies)
blocks_image --> numpy 2d array with numbers corresponding to block number
output: image --> stitched image """
image = np.zeros(np.shape(blocks_image))
for i in range(1,int(np.max(blocks_image))):
ind = np.asarray(np.where(blocks_image==i))
top = np.min(ind[0, :])
bottom = np.max(ind[0, :])
left = np.min(ind[1, :])
right = np.max(ind[1, :])
#print('top: {}, bottom: {}, left: {}, right: {}'.format(top, bottom, left, right))
image[top:bottom+1,left:right+1] = Blocks[i-1]
return image | ef6f5af40946828af664fc698e0b2f64dbbe8a96 | 31,557 |
def box_mesh(x_extent: float, y_extent: float, z_extent: float) -> Mesh:
"""create a box mesh"""
# wrapper around trimesh interface
# TODO: my own implementation of this would be nice
box = trimesh.primitives.Box(extents=(x_extent, y_extent, z_extent)).to_mesh()
return box.vertices, box.faces | 984b9ec62fe5e5c2d64c301d436d5f6de70a480f | 31,558 |
def create_bucket(storage_client, bucket_name, parsed_args):
"""Creates the test bucket.
Also sets up lots of different bucket settings to make sure they can be moved.
Args:
storage_client: The storage client object used to access GCS
bucket_name: The name of the bucket to create
parsed_args: the configargparser parsing of command line options
Returns:
The bucket object that has been created in GCS
"""
bucket = storage.Bucket(client=storage_client, name=bucket_name)
# Requester pays
bucket.requester_pays = False
# CORS
policies = bucket.cors
policies.append({'origin': ['/foo']})
policies[0]['maxAgeSeconds'] = 3600
bucket.cors = policies
# KMS Key - When a custom KMS key is set up, uncomment the line below to test it
#bucket.default_kms_key_name = parsed_args.test_default_kms_key_name
# Labels
bucket.labels = {'colour': 'red', 'flavour': 'cherry'}
# Object Lifecycle Rules
bucket.lifecycle_rules = [{
"action": {
"type": "Delete"
},
"condition": {
"age": 365
}
}]
# Location
bucket.location = parsed_args.test_bucket_location
# Storage Class
bucket.storage_class = parsed_args.test_storage_class
# File Versioning
# Setting this to True means we can't delete a non-empty bucket with the CLI in one
# bucket.delete command
bucket.versioning_enabled = False
# Access Logs
bucket.enable_logging(parsed_args.test_logging_bucket,
parsed_args.test_logging_prefix)
bucket.create()
# IAM Policies
policy = bucket.get_iam_policy()
# Uncomment the line below to view the existing IAM policies
#print(json.dumps(policy.to_api_repr(), indent=4, sort_keys=True))
policy['roles/storage.admin'].add('user:' + parsed_args.test_email_for_iam)
bucket.set_iam_policy(policy)
# ACLs
bucket.acl.user(parsed_args.test_email_for_iam).grant_read()
bucket.acl.save()
# Default Object ACL
bucket.default_object_acl.user(parsed_args.test_email_for_iam).grant_read()
bucket.default_object_acl.save()
bucket.update()
# Bucket Notification
notification = storage.notification.BucketNotification(
bucket,
parsed_args.test_topic_name,
custom_attributes={'myKey': 'myValue'},
event_types=['OBJECT_FINALIZE', 'OBJECT_DELETE'],
payload_format='JSON_API_V1')
notification.create()
return bucket | df7ccc9979007ee7278770f94c27363936961286 | 31,559 |
from typing import Dict
from typing import List
from typing import Tuple
def learn_parameters(df_path: str, pas: Dict[str, List[str]]) -> \
Tuple[Dict[str, List[str]], nx.DiGraph, Dict[str, List[float]]]:
"""
Gets the parameters.
:param df_path: CSV file.
:param pas: Parent-child relationships (structure).
:return: Tuple; first item is dictionary of domains; second item is a graph; third item is dictionary of probabilities.
"""
def vals_to_str():
ddf = df.copy(deep=True)
for col in ddf.columns:
ddf[col] = ddf[col].astype(str)
return ddf
def get_filters(ch, parents, domains):
pas = parents[ch]
if len(pas) == 0:
ch_domain = domains[ch]
return [f'{ch}=="{v}"' for v in ch_domain]
else:
def is_valid(tups):
n_tups = len(tups)
u_tups = len(set([name for name, _ in tups]))
if n_tups == u_tups:
return True
return False
vals = [[(pa, v) for v in domains[pa]] for pa in pas]
vals = vals + [[(ch, v) for v in domains[ch]]]
vals = chain(*vals)
vals = combinations(vals, len(pas) + 1)
vals = filter(is_valid, vals)
vals = map(lambda tups: ' and '.join([f'`{t[0]}`=="{t[1]}"' for t in tups]), vals)
vals = list(vals)
return vals
def get_total(filters, n):
def divide(arr):
a = np.array(arr)
n = np.sum(a)
if n == 0:
p = 1 / len(arr)
return [p for _ in range(len(arr))]
r = a / n
r = list(r)
return r
counts = [ddf.query(f).shape[0] for f in filters]
counts = [counts[i:i + n] for i in range(0, len(counts), n)]
counts = [divide(arr) for arr in counts]
counts = list(chain(*counts))
return counts
df = expand_data(df_path, pas)
g = get_graph(pas)
ddf = vals_to_str()
nodes = list(g.nodes())
domains = {n: sorted(list(ddf[n].unique())) for n in nodes}
parents = {ch: list(g.predecessors(ch)) for ch in nodes}
p = {ch: get_total(get_filters(ch, parents, domains), len(domains[ch])) for ch in nodes}
return domains, g, p | ea34c67e5bf6b09aadc34ee271415c74103711e3 | 31,560 |
import io
def extract_urls_n_email(src, all_files, strings):
"""IPA URL and Email Extraction."""
try:
logger.info('Starting IPA URL and Email Extraction')
email_n_file = []
url_n_file = []
url_list = []
domains = {}
all_files.append({'data': strings, 'name': 'IPA Strings Dump'})
for file in all_files:
if isinstance(file, dict):
relative_src_path = file['name']
dat = '\n'.join(file['data'])
# Skip CodeResources and contents under Frameworks
elif 'CodeResources' in file or '/Frameworks/' in file:
continue
elif file.endswith(('.nib', '.ttf', '.svg', '.woff2',
'.png', '.dylib', '.mobileprovision',
'Assets.car')):
continue
else:
dat = ''
relative_src_path = file.replace(src, '')
with io.open(file,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
# Extract URLs and Emails from Plists
urls, urls_nf, emails_nf = url_n_email_extract(
dat, relative_src_path)
url_list.extend(urls)
url_n_file.extend(urls_nf)
email_n_file.extend(emails_nf)
# Unique URLs
urls_list = list(set(url_list))
# Domain Extraction and Malware Check
logger.info('Performing Malware Check on extracted Domains')
domains = MalwareDomainCheck().scan(urls_list)
logger.info('Finished URL and Email Extraction')
binary_recon = {
'urls_list': urls_list,
'urlnfile': url_n_file,
'domains': domains,
'emailnfile': email_n_file,
}
return binary_recon
except Exception:
logger.exception('IPA URL and Email Extraction') | edb0dd4f0fe24de914f99b87999efd9a24795381 | 31,561 |
def find_scan_info(filename, position = '__P', scan = '__S', date = '____'):
"""
Find laser position and scan number by looking at the file name
"""
try:
file = filename.split(position, 2)
file = file[1].split(scan, 2)
laser_position = file[0]
file = file[1].split(date, 2)
scan_number = file[0]
except IndexError:
laser_position = -1
scan_number = -1
return laser_position, scan_number | f98afb440407ef7eac8ceda8e15327b5f5d32b35 | 31,562 |
def arglast(arr, convert=True, check=True):
"""Return the index of the last true element of the given array.
"""
if convert:
arr = np.asarray(arr).astype(bool)
if np.ndim(arr) != 1:
raise ValueError("`arglast` not yet supported for ND != 1 arrays!")
sel = arr.size - 1
sel = sel - np.argmax(arr[::-1])
if check and (not arr[sel]):
return None
return sel | b4c6424523a5a33a926b7530e6a6510fd813a42a | 31,563 |
def number_formatter(number, pos=None):
"""Convert a number into a human readable format."""
magnitude = 0
while abs(number) >= 100:
magnitude += 1
number /= 100.0
return '%.1f%s' % (number, ['', '', '', '', '', ''][magnitude]) | a9cfd3482b3a2187b8d18d6e21268e71b69ae2f2 | 31,564 |
from pathlib import Path
import shutil
def simcore_tree(cookies, tmpdir):
"""
bakes cookie, moves it into a osparc-simcore tree structure with
all the stub in place
"""
result = cookies.bake(
extra_context={"project_slug": PROJECT_SLUG, "github_username": "pcrespov"}
)
workdir = Path(result.project).resolve()
template_dir = workdir / "_osparc-simcore-stub"
simcore_dir = tmpdir / "osparc-simcore"
template_dir.rename(simcore_dir)
service_dir = simcore_dir / "services/{}".format(PROJECT_SLUG)
shutil.rmtree(service_dir)
workdir.rename(service_dir)
return (simcore_dir, service_dir) | f9889c1b530145eb94cc7ca3547d90759218b1dc | 31,565 |
def calc_density(temp, pressure, gas_constant):
"""
Calculate density via gas equation.
Parameters
----------
temp : array_like
temperatur in K
pressure : array_like
(partial) pressure in Pa
gas_constant: array_like
specicif gas constant in m^2/(s^2*K)
Returns
-------
out : ndarray
density in kg/m^3
"""
return pressure / (temp * gas_constant) | 1e492f9fb512b69585035ce2f784d8cf8fd1edb0 | 31,566 |
def __parse_ws_data(content, latitude=52.091579, longitude=5.119734):
"""Parse the buienradar xml and rain data."""
log.info("Parse ws data: latitude: %s, longitude: %s", latitude, longitude)
result = {SUCCESS: False, MESSAGE: None, DATA: None}
# convert the xml data into a dictionary:
try:
xmldata = xmltodict.parse(content)[__BRROOT]
except (xmltodict.expat.ExpatError, KeyError):
result[MESSAGE] = "Unable to parse content as xml."
log.exception(result[MESSAGE])
return result
# select the nearest weather station
loc_data = __select_nearest_ws(xmldata, latitude, longitude)
# process current weather data from selected weatherstation
if not loc_data:
result[MESSAGE] = 'No location selected.'
return result
if not __is_valid(loc_data):
result[MESSAGE] = 'Location data is invalid.'
return result
# add distance to weatherstation
log.debug("Raw location data: %s", loc_data)
result[DISTANCE] = __get_ws_distance(loc_data, latitude, longitude)
result = __parse_loc_data(loc_data, result)
# extract weather forecast
try:
fc_data = xmldata[__BRWEERGEGEVENS][__BRVERWACHTING]
except (xmltodict.expat.ExpatError, KeyError):
result[MESSAGE] = 'Unable to extract forecast data.'
log.exception(result[MESSAGE])
return result
if fc_data:
# result = __parse_fc_data(fc_data, result)
log.debug("Raw forecast data: %s", fc_data)
# pylint: disable=unsupported-assignment-operation
result[DATA][FORECAST] = __parse_fc_data(fc_data)
return result | 16fc5377951fc902218fb8571d18c3e5ef2d44bd | 31,567 |
def load_post_data(model, metadata): # NOQA: C901
"""Fully load metadata and contents into objects (including m2m relations)
:param model: Model class, any polymorphic sub-class of
django_docutils.rst_post.models.RSTPost
:type model: :class:`django:django.db.models.Model`
:param metadata:
:type metadata: dict
:returns: Instance of model
"""
m2m_metadata, metadata = split_m2m_metadata(metadata)
# try:
# metadata['author'] = User.objects.get(username=metadata['author'])
# except (User.DoesNotExist, ProgrammingError):
# metadata['author'] = None
metadata['author_name'] = metadata.pop('author', 'Anonymous')
pages = metadata.pop('pages')
m, created = model.objects.update_or_create(
slug_id=metadata['slug_id'], defaults=metadata
)
# assure metadata fields stick with auto-generated fields
for field in ['created', 'modified', 'slug_title']:
if field in metadata:
if getattr(m, field) != metadata[field]:
setattr(m, field, metadata[field])
# assure slugs update if title different (if not manually overwridden)
# todo, only run this if title changed
if not created and m.title and 'slug_title' not in metadata:
m.slug_title = slugify(m.title)
if m.is_dirty():
m.save()
for page in m.pages.all(): # we can safely delete and re-add content
page.delete()
PostPage = m.pages.model
# re-add one-to-many post -> page associations
for page_number, page_data in enumerate(pages, 1):
_, post_page = split_page_data(page_data)
post_page['page_number'] = page_number
p = PostPage(**post_page)
p.save()
if page_number == 1:
m.root_page = p
m.save()
m.pages.add(p)
# memorize/cache subtitle from first page in Post model
if m.pages.first().subtitle and m.subtitle != m.pages.first().subtitle:
m.subtitle = m.pages.first().subtitle
m.save()
return m | 13182e62f2006aaf30d8af95c7e19b34ccf8ce90 | 31,568 |
def address(addr, label=None):
"""Discover the proper class and return instance for a given Oscillate address.
:param addr: the address as a string-like object
:param label: a label for the address (defaults to `None`)
:rtype: :class:`Address`, :class:`SubAddress` or :class:`IntegratedAddress`
"""
addr = str(addr)
if _ADDR_REGEX.match(addr):
netbyte = bytearray(unhexlify(base58.decode(addr)))[0]
if netbyte in Address._valid_netbytes:
return Address(addr, label=label)
elif netbyte in SubAddress._valid_netbytes:
return SubAddress(addr, label=label)
raise ValueError("Invalid address netbyte {nb:x}. Allowed values are: {allowed}".format(
nb=netbyte,
allowed=", ".join(map(
lambda b: '%02x' % b,
sorted(Address._valid_netbytes + SubAddress._valid_netbytes)))))
elif _IADDR_REGEX.match(addr):
return IntegratedAddress(addr)
raise ValueError("Address must be either 95 or 106 characters long base58-encoded string, "
"is {addr} ({len} chars length)".format(addr=addr, len=len(addr))) | 13b1e24abc7303395ff9bbe82787bc67a4d377d6 | 31,569 |
def retrieve_molecule_number(pdb, resname):
"""
IDENTIFICATION OF MOLECULE NUMBER BASED
ON THE TER'S
"""
count = 0
with open(pdb, 'r') as x:
lines = x.readlines()
for i in lines:
if i.split()[0] == 'TER': count += 1
if i.split()[3] == resname:
molecule_number = count + 1
break
return molecule_number | 8342d1f5164707185eb1995cedd065a4f3824401 | 31,570 |
import ctypes
import ctypes.wintypes
import io
def _windows_write_string(s, out, skip_errors=True):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(
('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
if skip_errors:
continue
else:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True | 471fd456769e5306525bdd44d41158d2a3b024de | 31,571 |
def in_relative_frame(
pos_abs: np.ndarray,
rotation_matrix: np.ndarray,
translation: Point3D,
) -> np.ndarray:
"""
Inverse transform of `in_absolute_frame`.
"""
pos_relative = pos_abs + translation
pos_relative = pos_relative @ rotation_matrix
return pos_relative | 5f7789d7b5ff27047d6bb2df61ba7c841dc05b95 | 31,572 |
def check_url_namespace(app_configs=None, **kwargs):
"""Check NENS_AUTH_URL_NAMESPACE ends with a semicolon"""
namespace = settings.NENS_AUTH_URL_NAMESPACE
if not isinstance(namespace, str):
return [Error("The setting NENS_AUTH_URL_NAMESPACE should be a string")]
if namespace != "" and not namespace.endswith(":"):
return [
Error("The setting NENS_AUTH_URL_NAMESPACE should end with a " "colon (:).")
]
return [] | e97574a60083cb7a61dbf7a9f9d4c335d68577b5 | 31,573 |
def get_exif_data(fn):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
i = Image.open(fn)
info = i._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data | b6a97ed68753bb3e7ccb19a242c66465258ae602 | 31,574 |
import os
def get_circuitpython_version(device_path):
"""
Returns the version number of CircuitPython running on the board connected
via ``device_path``. This is obtained from the ``boot_out.txt`` file on the
device, whose content will start with something like this::
Adafruit CircuitPython 4.1.0 on 2019-08-02;
:param str device_path: The path to the connected board.
:return: The version string for CircuitPython running on the connected
board.
"""
with open(os.path.join(device_path, "boot_out.txt")) as boot:
circuit_python, _ = boot.read().split(";")
return circuit_python.split(" ")[-3] | ce4d407062566cd42473d2cef8d18024b0098b69 | 31,575 |
def _setup_modules(module_cls, variable_reparameterizing_predicate,
module_reparameterizing_predicate, module_init_kwargs):
"""Return `module_cls` instances for reparameterization and for reference."""
# Module to be tested.
module_to_reparameterize = _init_module(module_cls, module_init_kwargs)
# Replacement parameters.
paths, variables, replacement_variables = get_params_and_replacements(
module_to_reparameterize,
variable_reparameterizing_predicate,
module_reparameterizing_predicate,
)
# Reference modules.
before_reference_module = _init_reference_module(module_cls,
module_init_kwargs, paths,
variables)
after_reference_module = _init_reference_module(module_cls,
module_init_kwargs, paths,
replacement_variables)
return (
module_to_reparameterize,
before_reference_module,
after_reference_module,
variables,
replacement_variables,
) | 367ecae71835044055765ace56f6c0540e9a44ba | 31,576 |
def external_compatible(request, id):
""" Increment view counter for a compatible view """
increment_hit_counter_task.delay(id, 'compatible_count')
return json_success_response() | c82536cdebb2cf620394008d3ff1df13a87a9715 | 31,577 |
def lowpass_xr(da,cutoff,**kw):
"""
Like lowpass(), but ds is a data array with a time coordinate,
and cutoff is a timedelta64.
"""
data=da.values
time_secs=(da.time.values-da.time.values[0])/np.timedelta64(1,'s')
cutoff_secs=cutoff/np.timedelta64(1,'s')
axis=da.get_axis_num('time')
data_lp=lowpass(data,time_secs,cutoff_secs,axis=axis,**kw)
da_lp=da.copy(deep=True)
da_lp.values[:]=data_lp
da_lp.attrs['comment']="lowpass at %g seconds"%(cutoff_secs)
return da_lp | 0628d63a94c3614a396791c0b5abd52cb3590e04 | 31,578 |
def _calc_zonal_correlation(dat_tau, dat_pr, dat_tas, dat_lats, fig_config):
"""
Calculate zonal partial correlations for sliding windows.
Argument:
--------
dat_tau - data of global tau
dat_pr - precipitation
dat_tas - air temperature
dat_lats - latitude of the given model
fig_config - figure/diagnostic configurations
Return:
------
corr_dat zonal correlations
"""
# get the interval of latitude and create array for partial correlation
lat_int = abs(dat_lats[1] - dat_lats[0])
corr_dat = np.ones((np.shape(dat_tau)[0], 2)) * np.nan
# get the size of the sliding window based on the bandsize in degrees
window_size = round(fig_config['bandsize'] / (lat_int * 2.))
dat_tau, dat_pr, dat_tas = _apply_common_mask(dat_tau, dat_pr, dat_tas)
# minimum 1/8 of the given window has valid data points
min_points = np.shape(dat_tau)[1] * fig_config['min_points_frac']
for lat_index in range(len(corr_dat)):
istart = np.int(max(0, lat_index - window_size))
iend = np.int(min(np.size(dat_lats), lat_index + window_size + 1))
dat_tau_zone = dat_tau[istart:iend, :]
dat_pr_zone = dat_pr[istart:iend, :]
dat_tas_zone = dat_tas[istart:iend, :]
dat_x = np.ma.masked_invalid(dat_tau_zone).compressed().flatten()
dat_y = np.ma.masked_invalid(dat_pr_zone).compressed().flatten()
dat_z = np.ma.masked_invalid(dat_tas_zone).compressed().flatten()
num_valid_points = sum(~np.isnan(dat_x + dat_y + dat_z))
if num_valid_points > min_points:
corr_dat[lat_index, 1] = partial_corr(
np.vstack((dat_x, dat_y, dat_z)).T, fig_config)
corr_dat[lat_index, 0] = partial_corr(
np.vstack((dat_x, dat_z, dat_y)).T, fig_config)
return corr_dat | f596536bde5ded45da2ef44e388df19d60da2c75 | 31,579 |
def is_unary(string):
"""
Return true if the string is a defined unary mathematical
operator function.
"""
return string in mathwords.UNARY_FUNCTIONS | 914785cb757f155bc13f6e1ddcb4f9b41f2dd1a2 | 31,580 |
def GetBucketAndRemotePath(revision, builder_type=PERF_BUILDER,
target_arch='ia32', target_platform='chromium',
deps_patch_sha=None):
"""Returns the location where a build archive is expected to be.
Args:
revision: Revision string, e.g. a git commit hash or SVN revision.
builder_type: Type of build archive.
target_arch: Architecture, e.g. "ia32".
target_platform: Platform name, e.g. "chromium" or "android".
deps_patch_sha: SHA1 hash which identifies a particular combination of
custom revisions for dependency repositories.
Returns:
A pair of strings (bucket, path), where the archive is expected to be.
"""
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform)
bucket = build_archive.BucketName()
remote_path = build_archive.FilePath(revision, deps_patch_sha=deps_patch_sha)
return bucket, remote_path | 30ced6c37d42d2b531ae6ecafc4066c59fb8f6e4 | 31,581 |
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim. | adf627452ebe25b929cd78242cca382f6a62116d | 31,582 |
import math
def compute_star_verts(n_points, out_radius, in_radius):
"""Vertices for a star. `n_points` controls the number of points;
`out_radius` controls distance from points to centre; `in_radius` controls
radius from "depressions" (the things between points) to centre."""
assert n_points >= 3
vertices = []
out_vertex = pm.vec2d.Vec2d(0, out_radius)
in_vertex = pm.vec2d.Vec2d(0, in_radius)
for point_num in range(n_points):
out_angle = point_num * 2 * math.pi / n_points
vertices.append(out_vertex.rotated(out_angle))
in_angle = (2 * point_num + 1) * math.pi / n_points
vertices.append(in_vertex.rotated(in_angle))
vertices = [(v.x, v.y) for v in vertices]
return vertices | 97919efbb501dd41d5e6ee10e27c942167142b24 | 31,583 |
def create_ordering_dict(iterable):
"""Example: converts ['None', 'ResFiles'] to {'None': 0, 'ResFiles': 1}"""
return dict([(a, b) for (b, a) in dict(enumerate(iterable)).iteritems()]) | 389a0875f1542327e4aa5d038988d45a74b61937 | 31,584 |
def sparse2tuple(mx):
"""Convert sparse matrix to tuple representation.
ref: https://github.com/tkipf/gcn/blob/master/gcn/utils.py
"""
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape | a20b12c3e0c55c2d4739156f731e8db9e2d66feb | 31,585 |
def correct_predicted(y_true, y_pred):
""" Compare the ground truth and predict labels,
Parameters
----------
y_true: an array like for the true labels
y_pred: an array like for the predicted labels
Returns
-------
correct_predicted_idx: a list of index of correct predicted
correct_score: a rate of accuracy rate
H. J. @ 2018-12-18
"""
if len(y_true) != len(y_pred):
raise "Dimension unmatches"
correct_predicted_idx = []
for idx in range(len(y_true)):
if y_pred[idx] == y_true[idx]:
correct_predicted_idx.append(idx)
correct_score = accuracy_score(y_true, y_pred)
return correct_predicted_idx, correct_score | 3fae4287cb555b7258adde989ef4ef01cfb949ce | 31,586 |
def coord_image_to_trimesh(coord_img, validity_mask=None, batch_shape=None, image_dims=None, dev_str=None):
"""Create trimesh, with vertices and triangle indices, from co-ordinate image.
Parameters
----------
coord_img
Image of co-ordinates *[batch_shape,h,w,3]*
validity_mask
Boolean mask of where the coord image contains valid values
*[batch_shape,h,w,1]* (Default value = None)
batch_shape
Shape of batch. Inferred from inputs if None. (Default value = None)
image_dims
Image dimensions. Inferred from inputs in None. (Default value = None)
dev_str
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
Same as x if None. (Default value = None)
Returns
-------
ret
Vertices *[batch_shape,(hxw),3]* amd Trimesh indices *[batch_shape,n,3]*
"""
if dev_str is None:
dev_str = _ivy.dev_str(coord_img)
if batch_shape is None:
batch_shape = _ivy.shape(coord_img)[:-3]
if image_dims is None:
image_dims = _ivy.shape(coord_img)[-3:-1]
# shapes as lists
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x (HxW) x 3
vertices = _ivy.reshape(coord_img, batch_shape + [image_dims[0] * image_dims[1], 3])
if validity_mask is not None:
# BS x H-1 x W-1 x 1
t00_validity = validity_mask[..., 0:image_dims[0] - 1, 0:image_dims[1] - 1, :]
t01_validity = validity_mask[..., 0:image_dims[0] - 1, 1:image_dims[1], :]
t02_validity = validity_mask[..., 1:image_dims[0], 0:image_dims[1] - 1, :]
t10_validity = validity_mask[..., 1:image_dims[0], 1:image_dims[1], :]
t11_validity = t01_validity
t12_validity = t02_validity
# BS x H-1 x W-1 x 1
t0_validity = _ivy.logical_and(t00_validity, _ivy.logical_and(t01_validity, t02_validity))
t1_validity = _ivy.logical_and(t10_validity, _ivy.logical_and(t11_validity, t12_validity))
# BS x (H-1xW-1)
t0_validity_flat = _ivy.reshape(t0_validity, batch_shape + [-1])
t1_validity_flat = _ivy.reshape(t1_validity, batch_shape + [-1])
# BS x 2x(H-1xW-1)
trimesh_index_validity = _ivy.concatenate((t0_validity_flat, t1_validity_flat), -1)
# BS x N
trimesh_valid_indices = _ivy.indices_where(trimesh_index_validity)
# BS x 2x(H-1xW-1) x 3
all_trimesh_indices = create_trimesh_indices_for_image(batch_shape, image_dims, dev_str)
# BS x N x 3
trimesh_indices = _ivy.gather_nd(all_trimesh_indices, trimesh_valid_indices)
else:
# BS x N=2x(H-1xW-1) x 3
trimesh_indices = create_trimesh_indices_for_image(batch_shape, image_dims)
# BS x (HxW) x 3, BS x N x 3
return vertices, trimesh_indices | 8719498ddf24e67ed2ea245d73ac796662b5d08e | 31,587 |
def expand_db_html(html, for_editor=False):
"""
Expand database-representation HTML into proper HTML usable in either
templates or the rich text editor
"""
def replace_a_tag(m):
attrs = extract_attrs(m.group(1))
if 'linktype' not in attrs:
# return unchanged
return m.group(0)
handler = get_link_handler(attrs['linktype'])
return handler.expand_db_attributes(attrs, for_editor)
def replace_embed_tag(m):
attrs = extract_attrs(m.group(1))
handler = get_embed_handler(attrs['embedtype'])
return handler.expand_db_attributes(attrs, for_editor)
html = FIND_A_TAG.sub(replace_a_tag, html)
html = FIND_EMBED_TAG.sub(replace_embed_tag, html)
return html | 2e01f4aff7bc939fac11c031cde760351322d564 | 31,588 |
def hungarian(matrx):
"""Runs the Hungarian Algorithm on a given matrix and returns the optimal matching with potentials. Produces intermediate images while executing."""
frames = []
# Step 1: Prep matrix, get size
matrx = np.array(matrx)
size = matrx.shape[0]
# Step 2: Generate trivial potentials
rpotentials = []
cpotentials = [0 for i in range(size)]
for i in range(len(matrx)):
row = matrx[i]
rpotentials.append(max(row))
# Step 3: Initialize alternating tree
matching = []
S = {0}
T = set()
tree_root = Node(0)
x_nodes = {0: tree_root}
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), INITIAL_STAGE])
# Create helper functions
def neighbours(wset):
"""Finds all firms in equality graph with workers in wset."""
result = []
for x in wset:
# get row of firms for worker x
nbs = matrx[x, :]
for y in range(len(nbs)):
# check for equality
if nbs[y] == rpotentials[x] + cpotentials[y]:
result.append([x, y])
return result
def update_potentials():
"""Find the smallest difference between treed workers and untreed firms
and use it to update potentials."""
# when using functions in functions, if modifying variables, call nonlocal
nonlocal rpotentials, cpotentials
big = np.inf
args = None
# iterate over relevant pairs
for dx in S:
for dy in set(range(size)) - T:
# find the difference and check if its smaller than any we found before
weight = matrx[dx, dy]
alpha = rpotentials[dx] + cpotentials[dy] - weight
if alpha < big:
big = alpha
args = [dx, dy]
# apply difference to potentials as needed
for dx in S:
rpotentials[dx] -= big
for dy in T:
cpotentials[dy] += big
return big, S, T, args
# Step 4: Loop while our matching is too small
while len(matching) != size:
# Step A: Compute neighbours in equality graph
NS = neighbours(S)
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), NEIGHBOUR_STAGE, NS])
if set([b[1] for b in NS]) == T:
# Step B: If all firms are in the tree, update potentials to get a new one
alpha, ds, dt, args = update_potentials()
NS = neighbours(S)
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), UPDATE_STAGE, alpha, ds.copy(), dt.copy(), args])
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), NEIGHBOUR_STAGE, NS])
# get the untreed firm
pair = next(n for n in NS if n[1] not in T)
if pair[1] not in [m[1] for m in matching]:
# Step D: Firm is not matched so add it to matching
thecopy = matching.copy()
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), thecopy, MATCHING_STAGE, pair, thecopy])
matching.append(pair)
# Step E: Swap the alternating path in our alternating tree attached to the worker we matched
source = x_nodes[pair[0]]
matched = 1
while source.parent != None:
above = source.parent
if matched:
# if previously matched, this should be removed from matching
matching.remove([source.val, above.val])
else:
# if previous was a remove, this is a match
matching.append([above.val, source.val])
matched = 1 - matched
source = above
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), FLIPPING_STAGE, pair, thecopy])
# Step F: Destroy the tree, go to Step 4 to check completion, and possibly go to Step A
free = list(set(range(size)) - set([m[0] for m in matching]))
if len(free):
tree_root = Node(free[0])
x_nodes = {free[0]: tree_root}
S = {free[0]}
T = set()
frames.append([rpotentials.copy(), cpotentials.copy(),get_paths(x_nodes), matching.copy(), RESET_STAGE])
else:
x_nodes = {}
S = set()
T = set()
else:
# Step C: Firm is matched so add it to the tree and go back to Step A
matching_x = next(m[0] for m in matching if m[1] == pair[1])
S.add(matching_x)
T.add(pair[1])
source = x_nodes[pair[0]]
y_node = Node(pair[1], source)
x_node = Node(matching_x, y_node)
x_nodes[matching_x] = x_node
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), EXPANSION_STAGE])
revenues = [matrx[m[0], m[1]] for m in matching]
class Result:
"""A simple response object."""
def __init__(self, match, revenues, row_weights, col_weights, revenue_sum, result, matrix):
self.match = match
self.revenues = revenues
self.row_weights = row_weights
self.col_weights = col_weights
self.revenue_sum = revenue_sum
self.frames = process_frames(result, matrix)
def __str__(self):
size = len(self.match)
maxlen = max(len(str(max(self.revenues))), len(str(min(self.revenues))))
baselist = [[" "*maxlen for i in range(size)] for j in range(size)]
for i in range(size):
entry = self.match[i]
baselist[entry[0]][entry[1]] = str(self.revenues[i]).rjust(maxlen)
formatted_list = '\n'.join([str(row) for row in baselist])
return f"Matching:\n{formatted_list}\n\nRow Potentials: {self.row_weights}\nColumn Potentials: {self.col_weights}"
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), EXIT_STAGE])
return Result(matching, revenues, rpotentials, cpotentials, sum(revenues), frames, matrx) | dc4dffa819ed836a8e4aaffbe23b49b95101bffe | 31,589 |
def open_spreadsheet_from_args(google_client: gspread.Client, args):
"""
Attempt to open the Google Sheets spreadsheet specified by the given
command line arguments.
"""
if args.spreadsheet_id:
logger.info("Opening spreadsheet by ID '{}'".format(args.spreadsheet_id))
return google_client.open_by_key(args.spreadsheet_id)
elif args.spreadsheet_url:
logger.info("Opening spreadsheet by URL '{}'".format(args.spreadsheet_url))
return google_client.open_by_url(args.spreadsheet_url)
elif args.spreadsheet_name:
logger.info("Opening spreadsheet by name '{}'".format(args.spreadsheet_name))
return google_client.open(args.spreadsheet_name)
else:
raise ValueError("Invalid command line arguments - no spreadsheet identifier was provided") | 355545a00de77039250269c3c8ddf05b2f72ec48 | 31,590 |
def perturb_BB(image_shape, bb, max_pertub_pixel,
rng=None, max_aspect_ratio_diff=0.3,
max_try=100):
"""
Perturb a bounding box.
:param image_shape: [h, w]
:param bb: a `Rect` instance
:param max_pertub_pixel: pertubation on each coordinate
:param max_aspect_ratio_diff: result can't have an aspect ratio too different from the original
:param max_try: if cannot find a valid bounding box, return the original
:returns: new bounding box
"""
orig_ratio = bb.h * 1.0 / bb.w
if rng is None:
rng = np.random.RandomState()
for _ in range(max_try):
p = rng.randint(-max_pertub_pixel, max_pertub_pixel, [4])
newbb = bb.copy()
newbb.x += p[0]
newbb.y += p[1]
newx1 = bb.x1 + p[2]
newy1 = bb.y1 + p[3]
newbb.w = newx1 - newbb.x
newbb.h = newy1 - newbb.y
if not newbb.validate(image_shape):
continue
new_ratio = newbb.h * 1.0 / newbb.w
diff = abs(new_ratio - orig_ratio)
if diff / orig_ratio > max_aspect_ratio_diff:
continue
return newbb
return bb | 4044291bdcdf1639e9af86857cac158a67db5229 | 31,591 |
def simpleCheck(modelConfig, days=100, visuals=True, debug=False, modelName="default", outputDir="outputs", returnTimeseries=False):
"""
runs one simulatons with the given config and showcase the number of infection and the graph
"""
loadDill, saveDill = False, False
pickleName = flr.fullPath("coronaModel.pkl", "picklefile")
if not loadDill:
model = createModel(modelConfig, debug=debug) # this is the usual path
if saveDill:
flr.saveUsingDill(pickleName, model)
# save an instance for faster loading
return 0
else:
model = flr.loadUsingDill(pickleName)
#print("loaded pickled object successfully")
# start initialization and configuration
model.initializeAndConfigureObjects()
model.initializeStoringParameter(
["susceptible","exposed", "infected Asymptomatic",
"infected Asymptomatic Fixed" ,"infected Symptomatic Mild",
"infected Symptomatic Severe", "recovered", "quarantined"])
model.printRelevantInfo()
for _ in range(days):
model.updateSteps(24)
if debug:
model.printRelevantInfo()
model.final_check()
model.printRoomLog()
#tup = model.findDoubleTime()
#for description, tupVal in zip(("doublingTime", "doublingInterval", "doublingValue"), tup):
# print(description, tupVal)
if visuals:
fileformat = ".png"
model.visualOverTime(False, True, flr.fullPath(modelName+fileformat, outputDir))
modelName+="_total"
model.visualOverTime(True, True, flr.fullPath(modelName+fileformat, outputDir))
#model.visualizeBuildings()
# return (newdata, otherData, data, totalExposed)
if returnTimeseries:
return model.returnStoredInfo()
return model.outputs() | 4a1662688c83147f2ba1eb7fc232c6bbe5c4f050 | 31,592 |
def neural_network(inputs, weights):
"""
Takes an input vector and runs it through a 1-layer neural network
with a given weight matrix and returns the output.
Arg:
inputs - 2 x 1 NumPy array
weights - 2 x 1 NumPy array
Returns (in this order):
out - a 1 x 1 NumPy array, representing the output of the neural network
"""
v = np.matmul(weights.transpose(),inputs)
return np.tanh(v) | dc2d5cccf0cf0591c030b5dba2cd905f4583821c | 31,593 |
def complex_randn(shape):
"""
Returns a complex-valued numpy array of random values with shape `shape`
Args:
shape: (tuple) tuple of ints that will be the shape of the resultant complex numpy array
Returns: (:obj:`np.ndarray`): a complex-valued numpy array of random values with shape `shape`
"""
return np.random.randn(*shape) + 1j * np.random.randn(*shape) | 6379fb2fb481392dce7fb4eab0e85ea85651b290 | 31,594 |
import glob
from sys import path
import pickle
def load_pairs(inputdir, regex, npairs=100):
"""Load a previously generated set of pairs."""
pairfiles = glob.glob(path.join(inputdir, regex))
pairs = []
slcnr = 0
tilenr = 0
for pairfile in pairfiles:
p, src, dst, model, w = pickle.load(open(pairfile, 'rb')) # , encoding='latin1'
if np.isnan(model.params).any():
w = 0
population = range(0, src.shape[0])
try:
pairnrs = sample(population, npairs)
except ValueError:
pairnrs = [i for i in population]
print("TOO LITTLE DATA for pair in %s!" % pairfile)
pairs.append((p, src[pairnrs, :], dst[pairnrs, :], model, w))
slcnr = max(p[0][0], slcnr)
tilenr = max(p[0][1], tilenr)
return pairs, slcnr+1, tilenr+1 | 06a63e80e1c34f385e7492f19df65a9f68ec9626 | 31,595 |
def sin(x: REAL) -> float:
"""Sine."""
x %= 2 * pi
res = 0
k = 0
while True:
mem_res = res
res += (-1) ** k * x ** (2 * k + 1) / fac(2 * k + 1)
if abs(mem_res - res) < _TAYLOR_DIFFERENCE:
return res
k += 1 | 0ae009139bc640944ad1a90386e6c66a6b874108 | 31,596 |
import tokenize
from operator import getitem
def _getitem_row_chan(avg, idx, dtype):
""" Extract (row,chan,corr) arrays from dask array of tuples """
name = ("row-chan-average-getitem-%d-" % idx) + tokenize(avg, idx)
dim = ("row", "chan", "corr")
layers = db.blockwise(getitem, name, dim,
avg.name, dim,
idx, None,
numblocks={avg.name: avg.numblocks})
graph = HighLevelGraph.from_collections(name, layers, (avg,))
return da.Array(graph, name, avg.chunks,
meta=np.empty((0,)*len(dim), dtype=np.object),
dtype=dtype) | ff3da6b935cd4c3e909008fefea7a9c91d51d399 | 31,597 |
import gzip
def make_gzip(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.gzip
"""
tar_contents = open(tar_file, 'rb')
gzipfile = gzip.open(destination + '.tar.gz', 'wb')
gzipfile.writelines(tar_contents)
gzipfile.close()
tar_contents.close()
return True | 38d9e3de38cb204cc3912091099439b7e0825608 | 31,598 |
def symmetrize_confusion_matrix(CM, take='all'):
"""
Sums over population, symmetrizes, then return upper triangular portion
:param CM: numpy.ndarray confusion matrix in standard format
"""
if CM.ndim > 2:
CM = CM.sum(2)
assert len(CM.shape) == 2, 'This function is meant for single subject confusion matrices'
symmetrized = CM+CM.T
#print symmetrized
#print np.triu_indices(CM.shape[0])
if take == 'all':
rval = symmetrized[np.triu_indices(CM.shape[0])]
elif take == 'diagonal':
rval = symmetrized.diagonal()
elif take == 'off_diagonal':
rval = symmetrized[np.triu_indices(CM.shape[0], 1)]
else:
raise ValueError("Take %s not recognized. Allowed takes are all, diagonal and off_diagonal" % take)
return rval | 91964cc4fd08f869330413e7485f765696b92614 | 31,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.