content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
#print ('lll: ', blobs['rois'])
return blobs, im_scale_factors
|
d4adb2e049a86fe1a42aab6dea52b55aabeeb0d2
| 3,648,500
|
def string_limiter(text, limit):
"""
Reduces the number of words in the string to length provided.
Arguments:
text -- The string to reduce the length of
limit -- The number of characters that are allowed in the string
"""
for i in range(len(text)):
if i >= limit and text[i] == " ":
break
return text[:i]
|
1ae70d2115be72ec628f38b2c623064607f534ef
| 3,648,501
|
def in_ellipse(xy_list,width,height,angle=0,xy=[0,0]):
"""
Find data points inside an ellipse and return index list
Parameters:
xy_list: Points needs to be deteced.
width: Width of the ellipse
height: Height of the ellipse
angle: anti-clockwise rotation angle in degrees
xy: the origin of the ellipse
"""
if isinstance(xy_list,list):
xy_list = np.array(xy_list)
if not isinstance(xy_list,np.ndarray):
raise Exception(f"Unrecoginzed data type: {type(xy_list)}, \
should be list or np.ndarray")
new_xy_list = xy_list.copy()
new_xy_list = new_xy_list - xy
#------------ define coordinate conversion matrix----------
theta = angle/180*np.pi # degree to radians
con_mat = np.zeros((2,2))
con_mat[:,0] = [np.cos(theta),np.sin(theta)]
con_mat[:,1] = [np.sin(theta),-np.cos(theta)]
tmp = np.matmul(con_mat,new_xy_list.T)
con_xy_list = tmp.T
#------------ check one by one ----------------------------
idxs = []
for i,[x,y] in enumerate(con_xy_list):
if ((x/(width/2))**2+(y/(height/2))**2) < 1:
idxs.append(i)
return idxs
|
6540520caa6eef12871847f80d3ed42279b0c1a0
| 3,648,502
|
import logging
def get_real_images(dataset,
num_examples,
split=None,
failure_on_insufficient_examples=True):
"""Get num_examples images from the given dataset/split.
Args:
dataset: `ImageDataset` object.
num_examples: Number of images to read.
split: Split of the dataset to use. If None will use the default split for
eval defined by the dataset.
failure_on_insufficient_examples: If True raise an exception if the
dataset/split does not images. Otherwise will log to error and return
fewer images.
Returns:
4-D NumPy array with images with values in [0, 256].
Raises:
ValueError: If the dataset/split does not of the number of requested number
requested images and `failure_on_insufficient_examples` is True.
"""
logging.info("Start loading real data.")
with tf.Graph().as_default():
ds = dataset.eval_input_fn(split=split)
# Get real images from the dataset. In the case of a 1-channel
# dataset (like MNIST) convert it to 3 channels.
next_batch = ds.make_one_shot_iterator().get_next()[0]
shape = [num_examples] + next_batch.shape.as_list()
is_single_channel = shape[-1] == 1
if is_single_channel:
shape[-1] = 3
real_images = np.empty(shape, dtype=np.float32)
with tf.Session() as sess:
for i in range(num_examples):
try:
b = sess.run(next_batch)
b *= 255.0
if is_single_channel:
b = np.tile(b, [1, 1, 3])
real_images[i] = b
except tf.errors.OutOfRangeError:
logging.error("Reached the end of dataset. Read: %d samples.", i)
real_images = real_images[:i]
break
if real_images.shape[0] != num_examples:
if failure_on_insufficient_examples:
raise DatasetOutOfRangeError("Not enough examples in the dataset %s: %d / %d" %
(dataset, real_images.shape[0], num_examples))
else:
logging.error("Not enough examples in the dataset %s: %d / %d", dataset,
real_images.shape[0], num_examples)
logging.info("Done loading real data.")
return real_images
|
0f9be93076b8d94b3285a1f5badb8952788e2a82
| 3,648,503
|
from typing import Callable
from typing import Any
import websockets
async def call(fn: Callable, *args, **kwargs) -> Any:
"""
Submit function `fn` for remote execution with arguments `args` and `kwargs`
"""
async with websockets.connect(WS_SERVER_URI) as websocket:
task = serialize((fn, args, kwargs))
await websocket.send(task)
message = await websocket.recv()
results = deserialize(message)
if isinstance(results, TaskExecutionError):
raise results
return results
|
073090186e4a325eb32b44fb44c1628c6842c398
| 3,648,504
|
import numpy
def wrap_array_func(func):
"""
Returns a version of the function func() that works even when
func() is given a NumPy array that contains numbers with
uncertainties.
func() is supposed to return a NumPy array.
This wrapper is similar to uncertainties.wrap(), except that it
handles an array argument instead of float arguments.
func -- version that takes and returns a single NumPy array.
"""
@uncertainties.set_doc("""\
Version of %s(...) that works even when its first argument is a NumPy
array that contains numbers with uncertainties.
Warning: elements of the first argument array that are not
AffineScalarFunc objects must not depend on uncertainties.Variable
objects in any way. Otherwise, the dependence of the result in
uncertainties.Variable objects will be incorrect.
Original documentation:
%s""" % (func.__name__, func.__doc__))
def wrapped_func(arr, *args):
# Nominal value:
arr_nominal_value = nominal_values(arr)
func_nominal_value = func(arr_nominal_value, *args)
# The algorithm consists in numerically calculating the derivatives
# of func:
# Variables on which the array depends are collected:
variables = set()
for element in arr.flat:
# floats, etc. might be present
if isinstance(element, uncertainties.AffineScalarFunc):
variables |= set(element.derivatives.iterkeys())
# If the matrix has no variables, then the function value can be
# directly returned:
if not variables:
return func_nominal_value
# Calculation of the derivatives of each element with respect
# to the variables. Each element must be independent of the
# others. The derivatives have the same shape as the output
# array (which might differ from the shape of the input array,
# in the case of the pseudo-inverse).
derivatives = numpy.vectorize(lambda _: {})(func_nominal_value)
for var in variables:
# A basic assumption of this package is that the user
# guarantees that uncertainties cover a zone where
# evaluated functions are linear enough. Thus, numerical
# estimates of the derivative should be good over the
# standard deviation interval. This is true for the
# common case of a non-zero standard deviation of var. If
# the standard deviation of var is zero, then var has no
# impact on the uncertainty of the function func being
# calculated: an incorrect derivative has no impact. One
# scenario can give incorrect results, however, but it
# should be extremely uncommon: the user defines a
# variable x with 0 standard deviation, sets y = func(x)
# through this routine, changes the standard deviation of
# x, and prints y; in this case, the uncertainty on y
# might be incorrect, because this program had no idea of
# the scale on which func() is linear, when it calculated
# the numerical derivative.
# The standard deviation might be numerically too small
# for the evaluation of the derivative, though: we set the
# minimum variable shift.
shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value))
# An exceptional case is that of var being exactly zero.
# In this case, an arbitrary shift is used for the
# numerical calculation of the derivative. The resulting
# derivative value might be quite incorrect, but this does
# not matter as long as the uncertainty of var remains 0,
# since it is, in this case, a constant.
if not shift_var:
shift_var = 1e-8
# Shift of all the elements of arr when var changes by shift_var:
shift_arr = array_derivative(arr, var)*shift_var
# Origin value of array arr when var is shifted by shift_var:
shifted_arr_values = arr_nominal_value + shift_arr
func_shifted = func(shifted_arr_values, *args)
numerical_deriv = (func_shifted-func_nominal_value)/shift_var
# Update of the list of variables and associated
# derivatives, for each element:
for (derivative_dict, derivative_value) in (
zip(derivatives.flat, numerical_deriv.flat)):
if derivative_value:
derivative_dict[var] = derivative_value
# numbers with uncertainties are build from the result:
return numpy.vectorize(uncertainties.AffineScalarFunc)(
func_nominal_value, derivatives)
# It is easier to work with wrapped_func, which represents a
# wrapped version of 'func', when it bears the same name as
# 'func' (the name is used by repr(wrapped_func)).
wrapped_func.__name__ = func.__name__
return wrapped_func
|
7cbd33599b62df096db3ce84968cc13f24512fc0
| 3,648,505
|
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collides with base or pipes."""
pi = player['index']
player['w'] = fImages['player'][0].get_width()
player['h'] = fImages['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= nBaseY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = fImages['pipe'][0].get_width()
pipeH = fImages['pipe'][0].get_height()
for uPipe in upperPipes:
# pipe rect
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = fHitMask['player'][pi]
uHitmask = fHitMask['pipe'][0]
# if bird collided with pipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
if uCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if bFuryMode:
return [True, False, True, uPipe]
# normal mode
return [True, False]
for lPipe in lowerPipes:
# pipe rect
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = fHitMask['player'][pi]
lHitmask = fHitMask['pipe'][0]
# if bird collided with pipe
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if lCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if bFuryMode:
return [True, False, False, lPipe]
# normal mode
return [True, False]
return [False, False]
|
e638f0ae40610fc0c4097998e8fa3df0dc6a5d56
| 3,648,506
|
import os
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
return kwargs
|
93ada5c681c535b45fc5c321ab4b27b49587a106
| 3,648,507
|
def convert_gwp(context, qty, to):
"""Helper for :meth:`convert_unit` to perform GWP conversions."""
# Remove a leading 'gwp_' to produce the metric name
metric = context.split('gwp_')[1] if context else context
# Extract the species from *qty* and *to*, allowing supported aliases
species_from, units_from = extract_species(qty[1])
species_to, units_to = extract_species(to)
try:
# Convert using a (magnitude, unit) tuple with only units, and explicit
# input and output units
result = iam_units.convert_gwp(metric, (qty[0], units_from),
species_from, species_to)
except (AttributeError, ValueError):
# Missing *metric*, or *species_to* contains invalid units. pyam
# promises UndefinedUnitError in these cases. Use a subclass (above) to
# add a usage hint.
raise UndefinedUnitError(species_to) from None
except pint.DimensionalityError:
# Provide an exception with the user's inputs
raise pint.DimensionalityError(qty[1], to) from None
# Other exceptions are not caught and will pass up through convert_unit()
if units_to:
# Also convert the units
result = result.to(units_to)
else:
# *to* was only a species name. Provide units based on input and the
# output species name.
to = iam_units.format_mass(result, species_to, spec=':~')
return result, to
|
23d47e3b93f1ed694fbb5187433af5c8caa72dc8
| 3,648,508
|
from AppKit import NSSearchPathForDirectoriesInDomains
import sys
import os
def appdataPath(appname):
"""
Returns the generic location for storing application data in a cross
platform way.
:return <str>
"""
# determine Mac OS appdata location
if sys.platform == 'darwin':
# credit: MHL
try:
# NSApplicationSupportDirectory = 14
# NSUserDomainMask = 1
# True for expanding the tilde into a fully qualified path
basepath = NSSearchPathForDirectoriesInDomains(14, 1, True)
return os.path.join(basepath[0], appname)
except (ImportError, AttributeError, IndexError):
basepath = os.path.expanduser("~/Library/Application Support")
return os.path.join(basepath, appname)
# determine Windows OS appdata location
elif sys.platform == 'win32':
return os.path.join(os.environ.get('APPDATA'), appname)
# determine Linux OS appdata location
else:
return os.path.expanduser(os.path.join('~', '.' + appname))
|
d4d26890ca1fa607cbb09bd4838e3513d19c6af9
| 3,648,509
|
import random
def getAction(board, policy, action_set):
"""
return action for policy, chooses max from classifier output
"""
# if policy doesn't exist yet, choose action randomly, else get from policy model
if policy == None:
valid_actions = [i for i in action_set if i[0] > -1]
if len(valid_actions) == 0:
return (-1,-1,0)
rand_i = random.randint(0, len(valid_actions)-1)
# du_policy = [-12.63, 6.60, -9.22,-19.77,-13.08,-10.49,-1.61, -24.04]
# action = nextInitialMove(du_policy, board)
action = valid_actions[rand_i]
else:
piece = [0]*7 # one hot encode piece
piece[board.currentShape.shape -1] = 1
tot_features = np.append(board.getFeatures(), [piece])
action_scores = policy.predict([tot_features])
best_scores = np.argwhere(action_scores == np.amax(action_scores)).flatten().tolist()
max_score = np.random.choice(best_scores)
action = action_set[max_score]
return action
|
fddb9160f0571dfaf50f945c05d5dbb176465180
| 3,648,510
|
def f_assert_must_between(value_list, args):
"""
检测列表中的元素是否为数字或浮点数且在args的范围内
:param value_list: 待检测列表
:param args: 范围列表
:return: 异常或原值
example:
:value_list [2, 2, 3]
:args [1,3]
:value_list ['-2', '-3', 3]
:args ['-5',3]
"""
assert len(args) == 2
for value in value_list:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()
and float(args[0]) <= float(value) <= float(args[1])):
raise FeatureProcessError('%s f_assert_must_between %s Error' % (value_list, args))
return value_list
|
6e082f3df39509f0823862497249a06080bd7649
| 3,648,511
|
from scipy.stats import zscore
from scipy.ndimage import label
def annotate_muscle_zscore(raw, threshold=4, ch_type=None, min_length_good=0.1,
filter_freq=(110, 140), n_jobs=1, verbose=None):
"""Create annotations for segments that likely contain muscle artifacts.
Detects data segments containing activity in the frequency range given by
``filter_freq`` whose envelope magnitude exceeds the specified z-score
threshold, when summed across channels and divided by ``sqrt(n_channels)``.
False-positive transient peaks are prevented by low-pass filtering the
resulting z-score time series at 4 Hz. Only operates on a single channel
type, if ``ch_type`` is ``None`` it will select the first type in the list
``mag``, ``grad``, ``eeg``.
See :footcite:`Muthukumaraswamy2013` for background on choosing
``filter_freq`` and ``threshold``.
Parameters
----------
raw : instance of Raw
Data to estimate segments with muscle artifacts.
threshold : float
The threshold in z-scores for marking segments as containing muscle
activity artifacts.
ch_type : 'mag' | 'grad' | 'eeg' | None
The type of sensors to use. If ``None`` it will take the first type in
``mag``, ``grad``, ``eeg``.
min_length_good : float | None
The shortest allowed duration of "good data" (in seconds) between
adjacent annotations; shorter segments will be incorporated into the
surrounding annotations.``None`` is equivalent to ``0``.
Default is ``0.1``.
filter_freq : array-like, shape (2,)
The lower and upper frequencies of the band-pass filter.
Default is ``(110, 140)``.
%(n_jobs)s
%(verbose)s
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts annotated as BAD_muscle.
scores_muscle : array
Z-score values averaged across channels for each sample.
References
----------
.. footbibliography::
"""
raw_copy = raw.copy()
if ch_type is None:
raw_ch_type = raw_copy.get_channel_types()
if 'mag' in raw_ch_type:
ch_type = 'mag'
elif 'grad' in raw_ch_type:
ch_type = 'grad'
elif 'eeg' in raw_ch_type:
ch_type = 'eeg'
else:
raise ValueError('No M/EEG channel types found, please specify a'
' ch_type or provide M/EEG sensor data')
logger.info('Using %s sensors for muscle artifact detection'
% (ch_type))
if ch_type in ('mag', 'grad'):
raw_copy.pick_types(meg=ch_type, ref_meg=False)
else:
ch_type = {'meg': False, ch_type: True}
raw_copy.pick_types(**ch_type)
raw_copy.filter(filter_freq[0], filter_freq[1], fir_design='firwin',
pad="reflect_limited", n_jobs=n_jobs)
raw_copy.apply_hilbert(envelope=True, n_jobs=n_jobs)
data = raw_copy.get_data(reject_by_annotation="NaN")
nan_mask = ~np.isnan(data[0])
sfreq = raw_copy.info['sfreq']
art_scores = zscore(data[:, nan_mask], axis=1)
art_scores = art_scores.sum(axis=0) / np.sqrt(art_scores.shape[0])
art_scores = filter_data(art_scores, sfreq, None, 4)
scores_muscle = np.zeros(data.shape[1])
scores_muscle[nan_mask] = art_scores
art_mask = scores_muscle > threshold
# return muscle scores with NaNs
scores_muscle[~nan_mask] = np.nan
# remove artifact free periods shorter than min_length_good
min_length_good = 0 if min_length_good is None else min_length_good
min_samps = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for com in range(1, num_comps + 1):
l_idx = np.nonzero(comps == com)[0]
if len(l_idx) < min_samps:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times,
art_mask, 'BAD_muscle',
orig_time=raw.info['meas_date'])
_adjust_onset_meas_date(annot, raw)
return annot, scores_muscle
|
a09b2b9098c7dfc48b29548691e3c6c524a6b6bf
| 3,648,512
|
def circ_dist2(a, b):
"""Angle between two angles
"""
phi = np.e**(1j*a) / np.e**(1j*b)
ang_dist = np.arctan2(phi.imag, phi.real)
return ang_dist
|
db60caace70f23c656c4e97b94145a246c6b2995
| 3,648,513
|
def hinge_loss(positive_scores, negative_scores, margin=1.0):
"""
Pairwise hinge loss [1]:
loss(p, n) = \sum_i [\gamma - p_i + n_i]_+
[1] http://yann.lecun.com/exdb/publis/pdf/lecun-06.pdf
:param positive_scores: (N,) Tensor containing scores of positive examples.
:param negative_scores: (N,) Tensor containing scores of negative examples.
:param margin: Margin.
:return: Loss value.
"""
hinge_losses = tf.nn.relu(margin - positive_scores + negative_scores)
loss = tf.reduce_sum(hinge_losses)
return loss
|
daa698f012c30c8f99ba1ce08cbb73226251e3c1
| 3,648,514
|
def build(req):
"""Builder for this format.
Args:
req: flask request
Returns:
Json containing the creative data
"""
errors = []
v = {}
tdir = "/tmp/" + f.get_tmp_file_name()
index = get_html()
ext = f.get_ext(req.files["videofile"].filename)
if ext != "mp4":
return {"errors": ["Only mp4 files allowed"]}
f.save_file(req.files["videofile"], tdir + "/video.mp4")
v["backgroundColor"] = f.get_param("background_color")
v["autoclose"] = str(f.get_int_param("autoclose"))
return {"errors": errors, "dir": tdir, "index": index, "vars": v}
|
33ad1e003533407626cd3ccdd52e7f6c414e6470
| 3,648,515
|
def search(query, data, metric='euclidean', verbose=True):
"""
do search, return ranked list according to distance
metric: hamming/euclidean
query: one query per row
dat: one data point per row
"""
#calc dist of query and each data point
if metric not in ['euclidean', 'hamming']:
print 'metric must be one of (euclidean, hamming)'
sys.exit(0)
#b=time.clock()
dist=scipy.spatial.distance.cdist(query,data,metric)
sorted_idx=np.argsort(dist,axis=1)
#e=time.clock()
if verbose:
#calc avg dist to nearest 200 neighbors
nearpoints=sorted_idx[:,0:200]
d=[np.mean(dist[i][nearpoints[i]]) for i in range(nearpoints.shape[0])]
sys.stdout.write('%.4f, '% np.mean(d))
#print 'search time %.4f' % (e-b)
return sorted_idx
|
576471dfbe1dc0a2ae80235faf36d42d4b3a7f8a
| 3,648,516
|
def create_circle_widget(canvas: Canvas, x: int, y: int, color: str, circle_size: int):
"""create a centered circle on cell (x, y)"""
# in the canvas the 1st axis is horizontal and the 2nd is vertical
# we want the opposite so we flip x and y for the canvas
# to create an ellipsis, we give (x0, y0) and (x1, y1) that define the containing rectangle
pad = (CELL_SIZE - circle_size) / 2
i0 = 5 + y * CELL_SIZE + pad + 1
j0 = 5 + x * CELL_SIZE + pad + 1
i1 = 5 + (y + 1) * CELL_SIZE - pad
j1 = 5 + (x + 1) * CELL_SIZE - pad
return canvas.create_oval(i0, j0, i1, j1, fill=color, outline="")
|
b048b7d9c262c40a93cfef489468ad709a1e3883
| 3,648,517
|
def _format_program_counter_relative(state):
"""Program Counter Relative"""
program_counter = state.program_counter
operand = state.current_operand
if operand & 0x80 == 0x00:
near_addr = (program_counter + operand) & 0xFFFF
else:
near_addr = (program_counter - (0x100 - operand)) & 0xFFFF
return '${:04X}'.format(near_addr)
|
74f13e9230a6c116413b373b92e36bd884a906e7
| 3,648,518
|
def compile_program(
program: PyTEAL, mode: Mode = Mode.Application, version: int = 5
) -> bytes:
"""Compiles a PyTEAL smart contract program to the TEAL binary code.
Parameters
----------
program
A function which generates a PyTEAL expression, representing an Algorand program.
mode
The mode with which to compile the supplied PyTEAL program.
version
The version with which to compile the supplied PyTEAL program.
Returns
-------
bytes
The TEAL compiled binary code.
"""
source = compileTeal(program(), mode=mode, version=version)
return _compile_source(source)
|
50e9b4263a0622dfbe427c741ddfba2ff4007089
| 3,648,519
|
def fetch_url(url):
""" Fetches a URL and returns contents - use opener to support HTTPS. """
# Fetch and parse
logger.debug(u'Fetching %s', url)
# Use urllib2 directly for enabled SSL support (LXML doesn't by default)
timeout = 30
try:
opener = urllib2.urlopen(url, None, timeout)
# Fetch HTTP data in one batch, as handling the 'file-like' object to
# lxml results in thread-locking behaviour.
htmldata = opener.read()
except urllib2.URLError, urllib2.HTTPError:
# These type of errors are non-fatal - but *should* be logged.
logger.exception(u'HTTP Error for %s, returning emtpy string.',
url
)
return None
return htmldata
|
d6603fda5917423cb99c619810c8161a7c2885a1
| 3,648,520
|
def predict(yolo_outputs, image_shape, anchors, class_names, obj_threshold, nms_threshold, max_boxes = 1000):
"""
Process the results of the Yolo inference to retrieve the detected bounding boxes,
the corresponding class label, and the confidence score associated.
The threshold value 'obj_threshold' serves to discard low confidence predictions.
The 'nms_threshold' value is used to discard duplicate boxes for a same object (IoU metric).
"""
# Init
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
total_boxes = []
total_box_scores = []
input_shape = tf.shape(yolo_outputs[0])[1 : 3] * 32
# Process output tensors
for i in range(len(yolo_outputs)):
# Get bboxes and associated scores
detected_boxes, box_scores = boxes_and_scores(yolo_outputs[i], anchors[anchor_mask[i]], len(class_names), input_shape, image_shape)
# Append bboxes and level of confidence to list
total_boxes.append(detected_boxes)
total_box_scores.append(box_scores)
# Concatenate results
total_boxes = tf.concat(total_boxes, axis=0)
total_box_scores = tf.concat(total_box_scores, axis=0)
#print('------------------------------------')
#print('Boxe scores', box_scores)
# Mask to filter out low confidence detections
mask = box_scores >= obj_threshold
# Set boxes limit
max_boxes_tensor = tf.constant(max_boxes, dtype = tf.int32)
boxes_ = []
scores_ = []
classes_ = []
items_ = []
for c in range(len(class_names)):
# Get boxes labels
class_boxes = tf.boolean_mask(total_boxes, mask[:, c])
# Get associated score
class_box_scores = tf.boolean_mask(total_box_scores[:, c], mask[:, c])
# Concatenate label and score
item = [class_boxes, class_box_scores]
# Filter out duplicates when multiple boxes are predicted for a same object
nms_index = tf.image.non_max_suppression(class_boxes, class_box_scores, max_boxes_tensor, iou_threshold = nms_threshold)
# Remove the duplicates from the list of classes and scores
class_boxes = tf.gather(class_boxes, nms_index)
class_box_scores = tf.gather(class_box_scores, nms_index)
# Multiply score by class type
classes = tf.ones_like(class_box_scores, 'int32') * c
# Append results to lists
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
# Concatenate results
boxes_ = tf.concat(boxes_, axis = 0)
scores_ = tf.concat(scores_, axis = 0)
classes_ = tf.concat(classes_, axis = 0)
return boxes_, scores_, classes_
|
df90a5baed671e316e03c0a621ce1740efc7a833
| 3,648,521
|
import json
def get_event_details(entry, workday_user, demisto_user, days_before_hire_to_sync, days_before_hire_to_enable_ad,
deactivation_date_field, display_name_to_user_profile, email_to_user_profile,
employee_id_to_user_profile, source_priority):
"""
This function detects the event type and creates a dictionary which holds the event details.
If the event should not be created, None is returned.
Args:
entry: The employee's report entry.
workday_user: Workday user in XSOAR format.
demisto_user: The user profile in XSOAR.
deactivation_date_field: Deactivation date field - "lastdayofwork" or "terminationdate".
days_before_hire_to_sync: Number of days before hire date to sync hires, -1 if should sync instantly.
days_before_hire_to_enable_ad: Number of days before hire date to enable Active Directory account,
-1 if should sync instantly.
display_name_to_user_profile: A dictionary that maps display names to user profile indicators in XSOAR.
email_to_user_profile: A dictionary that maps email addresses to user profile indicators in XSOAR.
employee_id_to_user_profile: A dictionary that maps employee ids to user profile indicators in XSOAR.
source_priority: The source priority number.
Returns:
event: The event details.
"""
user_email = workday_user.get(EMAIL_ADDRESS_FIELD)
changed_fields = get_profile_changed_fields_str(demisto_user, workday_user)
demisto.debug(f'{changed_fields=}') # type: ignore
if not has_reached_threshold_date(days_before_hire_to_sync, workday_user) \
or new_hire_email_already_taken(workday_user, demisto_user, email_to_user_profile) \
or is_report_missing_required_user_data(workday_user) \
or not is_valid_source_of_truth(demisto_user, source_priority) \
or is_event_processed(demisto_user):
return None
if is_new_hire_event(demisto_user, workday_user, deactivation_date_field):
event_type = NEW_HIRE_EVENT_TYPE
event_details = 'The user has been hired.'
elif is_ad_activation_event(demisto_user, workday_user, days_before_hire_to_enable_ad):
event_type = ACTIVATE_AD_EVENT_TYPE
event_details = 'Active Directory user account was enabled.'
elif is_ad_deactivation_event(demisto_user, workday_user, days_before_hire_to_enable_ad, source_priority):
event_type = DEACTIVATE_AD_EVENT_TYPE
event_details = 'Active Directory user account was disabled due to hire date postponement.'
elif is_rehire_event(demisto_user, workday_user, changed_fields):
event_type = REHIRE_USER_EVENT_TYPE
event_details = 'The user has been rehired.'
elif is_termination_event(workday_user, demisto_user, deactivation_date_field):
event_type = TERMINATE_USER_EVENT_TYPE
event_details = 'The user has been terminated.'
elif is_update_event(workday_user, changed_fields):
event_type = UPDATE_USER_EVENT_TYPE
event_details = f'The user has been updated:\n{changed_fields}'
workday_user[OLD_USER_DATA_FIELD] = demisto_user
if demisto_user.get(SOURCE_PRIORITY_FIELD) != source_priority:
workday_user[CONVERSION_HIRE_FIELD] = True
event_details = f'A conversion hire was detected:\n{changed_fields}'
else:
demisto.debug(f'Could not detect changes in report for user with email address {user_email} - skipping.')
return None
if is_tufe_user(demisto_user) and event_type != REHIRE_USER_EVENT_TYPE:
return None
if is_display_name_already_taken(demisto_user, workday_user, display_name_to_user_profile) \
and event_type in [NEW_HIRE_EVENT_TYPE, REHIRE_USER_EVENT_TYPE, UPDATE_USER_EVENT_TYPE]:
event_details = f'Detected an "{event_type}" event, but display name already exists. Please review.'
if changed_fields:
event_details += f'\n{changed_fields}'
event_type = DEFAULT_INCIDENT_TYPE
entry[USER_PROFILE_INC_FIELD] = workday_user
return {
'name': user_email,
'rawJSON': json.dumps(entry),
'type': event_type,
'details': event_details
}
|
dd8095f81a0df7db9196accbcdef2f72ef92a39f
| 3,648,522
|
def ae_model(inputs, train=True, norm=True, **kwargs):
"""
AlexNet model definition as defined in the paper:
https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
You will need to EDIT this function. Please put your AlexNet implementation here.
Note:
1.) inputs['images'] is a [BATCH_SIZE x HEIGHT x WIDTH x CHANNELS] array coming
from the data provider.
2.) You will need to return 'output' which is a dictionary where
- output['pred'] is set to the output of your model
- output['conv1'] is set to the output of the conv1 layer
- output['conv1_kernel'] is set to conv1 kernels
- output['conv2'] is set to the output of the conv2 layer
- output['conv2_kernel'] is set to conv2 kernels
- and so on...
The output dictionary should include the following keys for AlexNet:
['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'pool1',
'pool2', 'pool5', 'fc6', 'fc7', 'fc8']
as well as the respective ['*_kernel'] keys for the kernels
3.) Set your variable scopes to the name of the respective layers, e.g.
with tf.variable_scope('conv1'):
outputs['conv1'] = ...
outputs['pool1'] = ...
and
with tf.variable_scope('fc6'):
outputs['fc6'] = ...
and so on.
4.) Use tf.get_variable() to create variables, while setting name='weights'
for each kernel, and name='bias' for each bias for all conv and fc layers.
For the pool layers name='pool'.
These steps are necessary to correctly load the pretrained alexnet model
from the database for the second part of the assignment.
"""
# propagate input targets
outputs = inputs
# dropout = .5 if train else None
input_to_network = inputs['images']
outputs['input'] = input_to_network
with tf.variable_scope('conv'):
outputs['relu'], outputs['conv_kernel'] = get_conv(input_to_network,[7,7,3,64],16)
with tf.variable_scope('deconv'):
outputs['deconv'] = get_deconv(outputs['relu'],[12,12,3,64],12,input_to_network.shape)
# shape = input_to_network.get_shape().as_list()
# stride = 16
# hidden_size = 2
# deconv_size = 12
# ### YOUR CODE HERE
# outputs['input'] = input_to_network
# conv_layer = K.layers.Conv2D(64,7,strides=(stride,stride),
# padding='same',
# kernel_initializer='glorot_normal')
# outputs['conv_kernel'] = conv_layer
# outputs['conv'] = conv_layer(input_to_network)
# outputs['relu'] = K.layers.Activation('relu')(outputs['conv'])
# outputs['deconv'] = K.layers.Conv2DTranspose(3,deconv_size,
# deconv_size,padding='valid',
# kernel_initializer='glorot_normal')(outputs['relu'])
### END OF YOUR CODE
for k in ['deconv']:
assert k in outputs, '%s was not found in outputs' % k
return outputs, {}
|
659908f6fbfb401941984668634382c6d30a8124
| 3,648,523
|
def filter_by_country(data, country=DEFAULT_COUNTRY):
"""
Filter provided data by country (defaults to Czechia).
data: pandas.DataFrame
country: str
"""
# Filter data by COUNTRY
return data[data[COLUMN_FILTER] == country]
|
bbf9eacd74a6032f1298cd7313d5c8233ec4a8ec
| 3,648,524
|
import requests
import os
def get_instance_id() -> str:
"""Returns the AWS instance id where this is running or "local"."""
global INSTANCE_ID
if INSTANCE_ID is None:
if get_env_variable("RUNNING_IN_CLOUD") == "True":
@retry(stop_max_attempt_number=3)
def retrieve_instance_id():
return requests.get(os.path.join(METADATA_URL, "instance-id")).text
INSTANCE_ID = retrieve_instance_id()
else:
INSTANCE_ID = "local"
return INSTANCE_ID
|
a73c49e114dac360151a8b571eadea13c8cb35a6
| 3,648,525
|
def scans_from_csvs(*inps, names=None):
"""
Read from csvs.
:param inps: file names of the csvs
:param names: names of the Scans
:return: list of Scans
"""
ns, temp_vals, heat_flow_vals = read_csvs(inps)
names = ns if names is None else names
return [Scan(*vals) for vals in zip(temp_vals, heat_flow_vals, names)]
|
6acddf330e10793dab6b76ec6a1edb1d2fd0660d
| 3,648,526
|
def part_b(puzzle_input):
"""
Calculate the answer for part_b.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_b.
"""
return str(collect_letters(puzzle_input)[1])
|
b82597c610e8a7d03ea68ddad392385636b0e2f3
| 3,648,527
|
def data_encoder(data):
"""
Encode all categorical values in the dataframe into numeric values.
@param data: the original dataframe
@return data: the same dataframe with all categorical variables encoded
"""
le = preprocessing.LabelEncoder()
cols = data.columns
numcols = data._get_numeric_data().columns
catecols = list(set(cols) - set(numcols))
le = preprocessing.LabelEncoder()
data[catecols] = data[catecols].astype(str).apply(le.fit_transform)
return data
|
2a2177891e1930311661f6549dbd33f329704fec
| 3,648,528
|
def _search_settings(method_settings_keys, settings):
"""
We maintain a dictionary of dimensionality reduction methods
in dim_settings_keys where each key (method) stores another
dictionary (md) holding that method's settings (parameters).
The keys of md are component ids and the values are parameter
names that will be passed to dim_reduce.
For example, dim_settings_keys['dim-PCA'] holds a dictionary
dim_pca_settings_keys = {
'dim-PCA-n-components': 'n_components',
'dim-PCA-whiten': 'whiten',
'dim-PCA-solver': 'svd_solver',
'dim-PCA-random-state': 'random_state'
}
where the keys (dim-PCA-key) is the widget id and the value is the
parameter name to pass to sklearn's PCA.
Parameters
__________
method_settings_keys: dict
Dicionary holding setting id's and parameter names.
settings: tuple of list of dicts of ...
Holds all children in the method-settings elements. This
is a mixture of lists, tuples, and dicts. We recursively search
this element to find the children with id's as determined by
dim_pca_settings_keys[dim_method]. By doing this we avoid having
to write new Input elements into our callbacks every time we add
a new setting. All that needs to be done is add the setting's
id into the settings_keys dict and it will be parsed automatically.
"""
kwargs = {}
for key in method_settings_keys:
child = next(_recur_search(settings, key))
# if there exists a component with 'key' as its 'id'
# then child should never be None. 'value' may be missing
# if not manually specified when constructing the widget.
if child is None or 'value' not in child:
raise InternalError("'value' key not found in child.")
kwargs[method_settings_keys[key]] = child['value']
return kwargs
|
20a8a0e24df1dc572dfbe541f1439bc6245f6170
| 3,648,529
|
def svn_opt_resolve_revisions(*args):
"""
svn_opt_resolve_revisions(svn_opt_revision_t peg_rev, svn_opt_revision_t op_rev,
svn_boolean_t is_url, svn_boolean_t notice_local_mods,
apr_pool_t pool) -> svn_error_t
"""
return _core.svn_opt_resolve_revisions(*args)
|
5f011401c4afc044f0fa877407f7c7a3da56f576
| 3,648,530
|
from typing import Union
from pathlib import Path
from typing import Any
def write_midi(
path: Union[str, Path],
music: "Music",
backend: str = "mido",
**kwargs: Any
):
"""Write a Music object to a MIDI file.
Parameters
----------
path : str or Path
Path to write the MIDI file.
music : :class:`muspy.Music`
Music object to write.
backend: {'mido', 'pretty_midi'}
Backend to use. Defaults to 'mido'.
"""
if backend == "mido":
return write_midi_mido(path, music, **kwargs)
if backend == "pretty_midi":
return write_midi_pretty_midi(path, music)
raise ValueError("`backend` must by one of 'mido' and 'pretty_midi'.")
|
963e5aafffbc348df17861b615c2e839c170adce
| 3,648,531
|
import string
from re import VERBOSE
def find_star_column(file, column_type, header_length) :
""" For an input .STAR file, search through the header and find the column numbers assigned to a given column_type (e.g. 'rlnMicrographName', ...)
"""
with open(file, 'r') as f :
line_num = 0
for line in f :
line_num += 1
# extract column number for micrograph name
if column_type in line :
for i in line.split()[1]:
if i in string.digits :
column_num = int(i)
# search header and no further to find setup values
if line_num >= header_length :
if VERBOSE:
# print("Read though header (%s lines total)" % header_length)
print("Column value for %s is %d" % (column_type, column_num))
return column_num
|
832a63d2084b6f007c0b58fbafbe037d0a81ab38
| 3,648,532
|
def recalculate_bb(df, customization_dict, image_dir):
"""After resizing images, bb coordinates are recalculated.
Args:
df (Dataframe): A df for image info.
customization_dict (dict): Resize dict.
image_dir (list): Image path list
Returns:
Dataframe: Updated dataframe.
"""
img = cv2.imread(image_dir[0])
h, w, _ = img.shape
new_width = customization_dict['width']
new_height = customization_dict['height']
w_ratio = new_width/w
h_ratio = new_height/h
df['x_min'] = df['x_min']*w_ratio
df['x_max'] = df['x_max']*w_ratio
df['y_min'] = df['y_min']*h_ratio
df['y_max'] = df['y_max']*h_ratio
df.x_min = df.x_min.astype("int16")
df.x_max = df.x_max.astype("int16")
df.y_min = df.y_min.astype("int16")
df.y_max = df.y_max.astype("int16")
return df
|
412149195898e492405fe58aef2d8c8ce360cef7
| 3,648,533
|
def free_port():
"""Returns a free port on this host
"""
return get_free_port()
|
94765cdb1a6e502c9ad650956754b3eda7f1b060
| 3,648,534
|
def justify_to_box(
boxstart: float,
boxsize: float,
itemsize: float,
just: float = 0.0) -> float:
"""
Justifies, similarly, but within a box.
"""
return boxstart + (boxsize - itemsize) * just
|
a644d5a7a6ff88009e66ffa35498d9720b24222c
| 3,648,535
|
import time
def newton(oracle, x_0, tolerance=1e-5, max_iter=100,
line_search_options=None, trace=False, display=False):
"""
Newton's optimization method.
Parameters
----------
oracle : BaseSmoothOracle-descendant object
Oracle with .func(), .grad() and .hess() methods implemented for computing
function value, its gradient and Hessian respectively. If the Hessian
returned by the oracle is not positive-definite method stops with message="newton_direction_error"
x_0 : np.array
Starting point for optimization algorithm
tolerance : float
Epsilon value for stopping criterion.
max_iter : int
Maximum number of iterations.
line_search_options : dict, LineSearchTool or None
Dictionary with line search options. See LineSearchTool class for details.
trace : bool
If True, the progress information is appended into history dictionary during training.
Otherwise None is returned instead of history.
display : bool
If True, debug information is displayed during optimization.
Returns
-------
x_star : np.array
The point found by the optimization procedure
message : string
'success' or the description of error:
- 'iterations_exceeded': if after max_iter iterations of the method x_k still doesn't satisfy
the stopping criterion.
- 'newton_direction_error': in case of failure of solving linear system with Hessian matrix (e.g. non-invertible matrix).
- 'computational_error': in case of getting Infinity or None value during the computations.
history : dictionary of lists or None
Dictionary containing the progress information or None if trace=False.
Dictionary has to be organized as follows:
- history['time'] : list of floats, containing time passed from the start of the method
- history['func'] : list of function values f(x_k) on every step of the algorithm
- history['grad_norm'] : list of values Euclidian norms ||g(x_k)|| of the gradient on every step of the algorithm
- history['x'] : list of np.arrays, containing the trajectory of the algorithm. ONLY STORE IF x.size <= 2
Example:
--------
>> oracle = QuadraticOracle(np.eye(5), np.arange(5))
>> x_opt, message, history = newton(oracle, np.zeros(5), line_search_options={'method': 'Constant', 'c': 1.0})
>> print('Found optimal point: {}'.format(x_opt))
Found optimal point: [ 0. 1. 2. 3. 4.]
"""
if tolerance <= 0.:
tolerance = 1e-32
history = defaultdict(list) if trace else None
line_search_tool = get_line_search_tool(line_search_options)
x = np.copy(x_0)
start_time = time.time()
grad_norm_0 = np.linalg.norm(oracle.grad(x))
def get_alpha(x, d):
if line_search_tool.is_correct(oracle, x, d, 1.):
return 1.
return line_search_tool.line_search(oracle, x, d)
def has_nans(*args):
for arg in args:
if np.isnan(arg).any() or np.isinf(arg).any():
return True
return False
for _ in range(max_iter):
func = oracle.func(x)
grad = oracle.grad(x)
hess = oracle.hess(x)
grad_norm = np.linalg.norm(grad)
if has_nans(func, grad, hess):
return x, 'computational_error', history
if grad_norm ** 2 <= tolerance * (grad_norm_0 ** 2):
_log_if_needed(display, 'Gradient descent done, x =', x, 'f(x) =', func)
_fill_history_if_needed(history, func, grad_norm, x, start_time)
return x, 'success', history
# could be saddle point, and we can try to use solve_saddle implemented above
try:
c, low = cho_factor(hess)
if has_nans(c, low):
return x, 'computational_error', history
d = cho_solve((c, low), -grad)
except:
_log_if_needed(display, 'Failure of solving linear system with Hessian matrix')
return x, 'newton_direction_error', history
alpha = get_alpha(x, d)
if alpha is None:
return x, 'computational_error', history
_fill_history_if_needed(history, func, grad_norm, x, start_time)
x = x + alpha * d
_fill_history_if_needed(history, oracle.func(x), np.linalg.norm(oracle.grad(x)), x, start_time)
return do_check_result(oracle, x, tolerance, grad_norm_0, history, display)
|
5ecf60c7e26e4bf4c965203ea9dddc5f22104225
| 3,648,536
|
import time
def generate_hostname(domain, hostname):
"""If hostname defined, returns FQDN.
If not, returns FQDN with base32 timestamp.
"""
# Take time.time() - float, then:
# - remove period
# - truncate to 17 digits
# - if it happen that last digits are 0 (and will not be displayed, so
# string is shorter - pad it with 0.
#
# The result ensures that timestamp is 17 char length and is increasing.
timestamp = str(time.time()).replace('.', '')[:17].ljust(17, '0')
b32time = aws.int2str(number=int(timestamp), base=32)
if hostname[-1] == '-':
hostname = '{}{}'.format(hostname, '{time}')
return '{}.{}'.format(hostname.format(time=b32time), domain)
|
54d85cea2b2aa69cc2864b0974852530453d63ca
| 3,648,537
|
def symmetrize(M):
"""Return symmetrized version of square upper/lower triangular matrix."""
return M + M.T - np.diag(M.diagonal())
|
a2f1311aa96d91d5c4992ad21018b07ac5954d1c
| 3,648,538
|
import sys
def process_checksums_get(storage_share, hash_type, url):
"""Run StorageShare get_object_checksum() method to get checksum of file/object.
Run StorageShare get_object_checksum() method to get the requested type of
checksum for file/object whose URL is given.
The client also needs to sp
If the StorageShare does not support the method, the client will get an
error message and the process will exit.
Arguments:
storage_share -- dynafed_storagestats StorageShare object.
hash_type -- string that indicates the type of has requested.
url -- string containing url to the desired file/object.
Returns:
String containing checksum or 'None'.
"""
try:
_checksum = storage_share.get_object_checksum(hash_type, url)
except dynafed_storagestats.exceptions.ChecksumWarningMissingChecksum as WARN:
_logger.warning("[%s]%s", storage_share.id, WARN.debug)
return None
except AttributeError as ERR:
_logger.error(
"[%s]Checksum GET operation not supported for %s. %s",
storage_share.id,
storage_share.storageprotocol,
ERR
)
print(
"[ERROR][%s]Checksum GET operation not supported %s. %s" % (
storage_share.id,
storage_share.storageprotocol,
ERR
), file=sys.stderr
)
sys.exit(1)
else:
return _checksum
|
addd04d54348958b5c06b377a796fa12a30bd3cd
| 3,648,539
|
def prep_public_water_supply_fraction() -> pd.DataFrame:
"""calculates public water supply deliveries for the commercial and industrial sectors individually
as a ratio to the sum of public water supply deliveries to residential end users and thermoelectric cooling.
Used in calculation of public water supply demand to commercial and industrial sectors.
:return: DataFrame of public water supply ratios for commercial and industrial sector.
"""
# read in data
df = prep_water_use_1995(variables=['FIPS', 'State', 'PS-DelDO', 'PS-DelPT', 'PS-DelCO', 'PS-DelIN'])
df_loc = prep_water_use_2015() # prepared list of 2015 counties with FIPS codes
# calculate ratio of commercial pws to sum of domestic and thermoelectric cooling pws
df['com_pws_fraction'] = np.where((df['PS-DelDO'] + df['PS-DelPT'] <= 0),
np.nan, (df['PS-DelCO'] / (df['PS-DelDO'] + df['PS-DelPT'])))
# calculate ratio of industrial pws to sum of domestic and thermoelectric cooling pws
df["ind_pws_fraction"] = np.where(((df['PS-DelDO'] + df['PS-DelPT']) <= 0),
np.nan, df['PS-DelIN'] / (df['PS-DelDO'] + df['PS-DelPT']))
# reduce dataframe
df = df[['FIPS', 'State', 'com_pws_fraction', 'ind_pws_fraction']]
# fill counties with 0 commercial or industrial public water supply ratios with state averages
df_mean = df.groupby('State', as_index=False).mean()
rename_list = df_mean.columns[1:].to_list()
for col in rename_list:
new_name = f"{col}_state"
df_mean = df_mean.rename(columns={col: new_name})
df_mean_all = pd.merge(df, df_mean, how='left', on=['State'])
# replace counties with consumption fractions of zero with the state average to replace missing data
rep_list = df.columns[2:].to_list()
for col in rep_list:
mean_name = f"{col}_state"
df_mean_all[col].fillna(df_mean_all[mean_name], inplace=True)
# reduce dataframe to required output
df_output = df_mean_all[['FIPS', 'State', 'com_pws_fraction', 'ind_pws_fraction']]
# merge with full list of counties from 2015 water data
df_output = pd.merge(df_loc, df_output, how='left', on=['FIPS', 'State'])
return df_output
|
bb759cfa25add08b0faf0d9232448698d0ae8d53
| 3,648,540
|
def set_matchq_in_constraint(a, cons_index):
"""
Takes care of the case, when a pattern matching has to be done inside a constraint.
"""
lst = []
res = ''
if isinstance(a, list):
if a[0] == 'MatchQ':
s = a
optional = get_default_values(s, {})
r = generate_sympy_from_parsed(s, replace_Int=True)
r, free_symbols = add_wildcards(r, optional=optional)
free_symbols = sorted(set(free_symbols)) # remove common symbols
r = sympify(r, locals={"Or": Function("Or"), "And": Function("And"), "Not":Function("Not")})
pattern = r.args[1].args[0]
cons = r.args[1].args[1]
pattern = rubi_printer(pattern, sympy_integers=True)
pattern = setWC(pattern)
res = ' def _cons_f_{}({}):\n return {}\n'.format(cons_index, ', '.join(free_symbols), cons)
res += ' _cons_{} = CustomConstraint(_cons_f_{})\n'.format(cons_index, cons_index)
res += ' pat = Pattern(UtilityOperator({}, x), _cons_{})\n'.format(pattern, cons_index)
res += ' result_matchq = is_match(UtilityOperator({}, x), pat)'.format(r.args[0])
return "result_matchq", res
else:
for i in a:
if isinstance(i, list):
r = set_matchq_in_constraint(i, cons_index)
lst.append(r[0])
res = r[1]
else:
lst.append(i)
return lst, res
|
c40f15f500736102f4abf17169715387c2f1b91b
| 3,648,541
|
def istype(klass, object):
"""Return whether an object is a member of a given class."""
try: raise object
except klass: return 1
except: return 0
|
bceb83914a9a346c59d90984730dddb808bf0e78
| 3,648,542
|
from typing import Mapping
from typing import Any
def _embed_from_mapping(mapping: Mapping[str, Any], ref: str) -> mapry.Embed:
"""
Parse the embed from the mapping.
All the fields are parsed except the properties, which are parsed
in a separate step.
:param mapping: to be parsed
:param ref: reference to the embeddable structure in the mapry schema
:return: embeddable structure without the properties
"""
return mapry.Embed(
name=mapping['name'], description=mapping['description'], ref=ref)
|
10d3894aa33d41efd47f03335c6e90547ee26e6c
| 3,648,543
|
import pdb
def generate_csv_from_pnl(pnl_file_name):
"""在.pnl文件的源路径下新生成一个.csv文件. 拷贝自export_to_csv函数. pnl_file_name需包含路径. """
pnlc = alib.read_pnl_from_file(pnl_file_name)
pnl = pnlc[1]
if pnl is None:
print('pnl文件{}不存在!'.format(pnl_file_name))
pdb.set_trace()
csv_file_name = pnl_file_name[:-4] + '.csv'
outf = open(csv_file_name, 'w')
outf.write(alib.pnl_columns + '\n')
f = ','.join(['%g'] * 14) + ',%d,%d,%d'
for d in pnl:
outf.write((f + '\n') % d)
outf.close()
return csv_file_name
|
ceed6ce7d31fe6cb738252b7458c2f404c01135c
| 3,648,544
|
def parse_number(text, allow_to_fail):
"""
Convert to integer, throw if fails
:param text: Number as text (decimal, hex or binary)
:return: Integer value
"""
try:
if text in defines:
return parse_number(defines.get(text), allow_to_fail)
return to_number(text)
except ValueError:
if allow_to_fail:
return 0
else:
raise ASMSyntaxError(f'Invalid number format: {text}')
|
90906b56e8a88fcde9f66defeed48cf12371d375
| 3,648,545
|
import importlib
def pick_vis_func(options: EasyDict):
"""Pick the function to visualize one batch.
:param options:
:return:
"""
importlib.invalidate_caches()
vis_func = getattr(
import_module("utils.vis.{}".format(options.vis.name[0])),
"{}".format(options.vis.name[1])
)
return vis_func
|
4d65f00075c984e5407af09d3680f2195be640bb
| 3,648,546
|
import numpy
def scale_quadrature(quad_func, order, lower, upper, **kwargs):
"""
Scale quadrature rule designed for unit interval to an arbitrary interval.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights on the unit
interval.
order (int):
The quadrature order passed to the quadrature function.
lower (float):
The new lower limit for the quadrature function.
upper (float):
The new upper limit for the quadrature function.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func(order, **kwargs)`` except scaled to a new interval.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1)[numpy.newaxis],
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
>>> scale_quadrature(my_quad, 2, lower=0, upper=2)
(array([[0., 1., 2.]]), array([0.5, 0.5, 0.5]))
>>> scale_quadrature(my_quad, 2, lower=-0.5, upper=0.5)
(array([[-0.5, 0. , 0.5]]), array([0.25, 0.25, 0.25]))
"""
abscissas, weights = quad_func(order=order, **kwargs)
assert numpy.all(abscissas >= 0) and numpy.all(abscissas <= 1)
assert numpy.sum(weights) <= 1+1e-10
assert numpy.sum(weights > 0)
weights = weights*(upper-lower)
abscissas = (abscissas.T*(upper-lower)+lower).T
return abscissas, weights
|
f3854cee12a482bc9c92fe2809a0388dddb422e0
| 3,648,547
|
def ask_for_region(self):
"""ask user for region to select (2-step process)"""
selection = ["BACK"]
choices = []
while "BACK" in selection:
response = questionary.select(
"Select area by (you can go back and combine these choices):",
choices=["continents", "regions", "countries"],
).ask()
selection_items = getattr(self, response)
if response == "regions":
choices = (
[Choice(r) for r in selection_items if "EU" in r]
+ [Separator()]
+ [Choice(r) for r in selection_items if "EU" not in r]
)
else:
choices = [Choice(r) for r in selection_items.keys()]
# preselect previous choices
for choice in choices:
if choice.value in selection:
choice.checked = True
current_selection = questionary.checkbox("Please select", choices=choices).ask()
selection = selection + current_selection
if "BACK" not in current_selection:
selection = clean_results(selection)
print(f"Selection: {clean_results(selection)}")
selection = list(set(clean_results(selection)))
return self._extract_countries(selection)
|
dd7ea9ca33ca8348fba0d36c5661b6fd30c96090
| 3,648,548
|
import array
def peakAlign(refw,w):
""" Difference between the maximum peak positions of the signals.
This function returns the difference, in samples, between the peaks position
of the signals. If the reference signal has various peaks, the one
chosen is the peak which is closer to the middle of the signal, and if the
other signal has more than one peak also, the chosen is the one closer to
the reference peak signal.
The first signal introduced is the reference signal.
Parameters
----------
refw: array-like
the input reference signal.
w: array-like
the input signal.
Returns
-------
al: int
the difference between the two events position
Example
-------
>>> peakAlign([5,7,3,20,13,5,7],[5,1,8,4,3,10,3])
1
See also: maxAlign(), minAlign(), peakNegAlign(), infMaxAlign(), infMinAlign()
"""
p_mw = array ( peaks(array(refw),min(refw)) )
p_w = array ( peaks(array(w),min(w)) )
if (len(p_mw)>1):
min_al = argmin(abs( (len(refw)/2) - p_mw)) #to choose the peak closer to the middle of the signal
p_mw=p_mw[min_al]
if (list(p_w) == [] ):
p_w = p_mw
elif (len(p_w)>1):
min_al = argmin(abs(p_w - p_mw)) #to choose the peak closer to the peak of the reference signal
p_w=p_w[min_al]
return int(array(p_mw-p_w))
|
a7497e828008281318dff25b5547e0ab4f8e9a35
| 3,648,549
|
def get_games(by_category, n_games):
"""
This function imports the dataframe of most popular games and returns a list of game names
with the length of 'n_games' selected by 'by_category'. Valid options for 'by_category': rank, num_user_ratings
"""
df = pd.read_csv('../data/popular_games_with_image_url.csv', index_col = 0)
if by_category == 'rank':
ascending = True
elif by_category == 'num_user_ratings':
ascending = False
df = df.sort_values(by_category, ascending = ascending)
df = df.head(n_games)
game_list = []
image_list = []
for row in df.iterrows():
#game_name = row[1]['name'] + ' (' + str(row[1]['year_published']) + ')'
game_name = row[1]['name']
game_list.append(game_name)
image_url = row[1]['image_url']
image_list.append(image_url)
return game_list, image_list
|
2702d6b072ba9ac49565c9ee768d65c431441724
| 3,648,550
|
def diurnalPDF( t, amplitude=0.5, phase=pi8 ):
"""
"t" must be specified in gps seconds
we convert the time in gps seconds into the number of seconds after the most recent 00:00:00 UTC
return (1 + amplitude*sin(2*pi*t/day - phase))/day
"""
if amplitude > 1:
raise ValueError("amplitude cannot be larger than 1")
t = gps2relativeUTC(t)
return (1 + amplitude*np.sin(twopi*t/day - phase))/day
|
6bf755851d2bf2582ca98c1bcbe67aa7dc4e0a2f
| 3,648,551
|
def imap_workers(workers, size=2, exception_handler=None):
"""Concurrently converts a generator object of Workers to
a generator of Responses.
:param workers: a generator of worker objects.
:param size: Specifies the number of workers to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Worker, Exception
"""
pool = Pool(size)
def start(r):
return r.start()
for worker in pool.imap_unordered(start, workers):
if worker.response is not None:
yield worker.response
elif exception_handler:
exception_handler(worker, worker.exception)
pool.join()
|
c4ab81770b40238025055394bf43ca0dc99dd506
| 3,648,552
|
import time
def output_time(time_this:float=None,end:str=" | ")->float:
"""输入unix时间戳,按格式输出时间。默认为当前时间"""
if not time_this:
time_this=time.time()-TIMEZONE
print(time.strftime('%Y-%m-%d %H:%M:%S',time.gmtime(time_this)),end=end)
#
return time_this
|
ba17400306af7142a91bd5b62941c52fc59dbf1a
| 3,648,553
|
def blend_color(color1, color2, blend_ratio):
"""
Blend two colors together given the blend_ration
:param color1: pygame.Color
:param color2: pygame.Color
:param blend_ratio: float between 0.0 and 1.0
:return: pygame.Color
"""
r = color1.r + (color2.r - color1.r) * blend_ratio
g = color1.g + (color2.g - color1.g) * blend_ratio
b = color1.b + (color2.b - color1.b) * blend_ratio
a = color1.a + (color2.a - color1.a) * blend_ratio
return pygame.Color(int(r), int(g), int(b), int(a))
|
0bb7fa1570472e60bd93a98f6da3a515ca9dd500
| 3,648,554
|
import os
def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False):
"""Returns a list of images by a given tag from Flick25k dataset,
it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`_
at the first time you use it.
Parameters
------------
tag : string or None
If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`_.
If you want to get all images, set to ``None``.
path : string
The path that the data is downloaded to, defaults is ``data/flickr25k/``.
n_threads : int, number of thread to read image.
printable : bool, print infomation when reading images, default is ``False``.
Examples
-----------
- Get images with tag of sky
>>> images = tl.files.load_flickr25k_dataset(tag='sky')
- Get all images
>>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)
"""
path = os.path.join(path, 'flickr25k')
filename = 'mirflickr25k.zip'
url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'
## download dataset
if folder_exists(path+"/mirflickr") is False:
print("[*] Flickr25k is nonexistent in {}".format(path))
maybe_download_and_extract(filename, path, url, extract=True)
del_file(path+'/'+filename)
## return images by the given tag.
# 1. image path list
folder_imgs = path+"/mirflickr"
path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
path_imgs.sort(key=natural_keys)
# print(path_imgs[0:10])
# 2. tag path list
folder_tags = path+"/mirflickr/meta/tags"
path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
path_tags.sort(key=natural_keys)
# print(path_tags[0:10])
# 3. select images
if tag is None:
print("[Flickr25k] reading all images")
else:
print("[Flickr25k] reading images with tag: {}".format(tag))
images_list = []
for idx in range(0, len(path_tags)):
tags = read_file(folder_tags+'/'+path_tags[idx]).split('\n')
# print(idx+1, tags)
if tag is None or tag in tags:
images_list.append(path_imgs[idx])
images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)
return images
|
b2d449a9d234aeb3d0d80bd1508ebcb55d57e6cd
| 3,648,555
|
def solve_a_star(start_id: str, end_id: str, nodes, edges):
"""
Get the shortest distance between two nodes using Dijkstra's algorithm.
:param start_id: ID of the start node
:param end_id: ID of the end node
:return: Shortest distance between start and end node
"""
solution_t_start = perf_counter()
solution = []
associations = {start_id: None}
closed = set() # Nodes that have been resolved
fringe = [] # Min-heap that holds nodes to check (aka. fringe)
start_y, start_x = nodes[start_id]
end_y, end_x = nodes[end_id]
start_node = (0 + calc_distance(start_y, start_x, end_y, end_x), 0, start_id)
heappush(fringe, start_node)
while len(fringe) > 0:
c_node = heappop(fringe)
c_f, c_distance, c_id = c_node
c_y, c_x = nodes[c_id]
if c_id == end_id:
return c_distance, solution, perf_counter() - solution_t_start, associations_to_path(associations, c_id,
nodes)
if c_id not in closed:
closed.add(c_id)
for child_id, c_to_child_distance in edges[c_id]:
if child_id not in closed:
# Add to solution path
if child_id not in associations:
associations[child_id] = c_id
child_distance = c_distance + c_to_child_distance # Cost function
child_y, child_x = nodes[child_id]
child_node = (
child_distance + calc_distance(child_y, child_x, end_y, end_x), child_distance, child_id)
heappush(fringe, child_node)
solution.append(((c_y, c_x), (child_y, child_x)))
return None
|
467257c15c7a99d217d75b69876b2f64ecd0b58e
| 3,648,556
|
def get_dhcp_relay_statistics(dut, interface="", family="ipv4", cli_type="", skip_error_check=True):
"""
API to get DHCP relay statistics
Author Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:type dut:
:param interface:
:type interface:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
if cli_type in ['click', 'klish']:
ip_val = "ip" if family == "ipv4" else "ipv6"
if interface:
command = "show {} dhcp-relay statistics {}".format(ip_val, interface)
else:
command = "show {} dhcp-relay statistics".format(ip_val)
return st.show(dut, command, type=cli_type, skip_error_check=skip_error_check)
elif cli_type in ['rest-patch', 'rest-put']:
return _get_rest_dhcp_relay_statistics(dut, interface=interface, family=family)
else:
st.error("Unsupported CLI_TYPE: {}".format(cli_type))
return False
|
0784cd367d638124458d9e0fb808b45bcc239a84
| 3,648,557
|
def _runcmd(cmd, proc):
"""Run a command"""
cmdstr = proc.template(cmd,
**proc.envs).render(dict(proc=proc, args=proc.args))
logger.info('Running command from pyppl_runcmd ...', proc=proc.id)
logger.debug(' ' + cmdstr, proc=proc.id)
cmd = cmdy.bash(c=cmdstr, _raise=False).iter
for line in cmd:
logger.cmdout(line, proc=proc.id)
cmd.wait()
if cmd.rc == 0:
return True
for line in cmd.stderr.splitlines():
logger.cmderr(line, proc=proc.id)
return False
|
f7271954e94b45ba46cb7d32535b29f7946cbde0
| 3,648,558
|
def check_rule_for_Azure_ML(rule):
"""Check if the ports required for Azure Machine Learning are open"""
required_ports = ['29876', '29877']
if check_source_address_prefix(rule.source_address_prefix) is False:
return False
if check_protocol(rule.protocol) is False:
return False
if check_direction(rule.direction) is False:
return False
if check_provisioning_state(rule.provisioning_state) is False:
return False
if rule.destination_port_range is not None:
if check_ports_in_destination_port_ranges(
required_ports,
[rule.destination_port_range]) is False:
return False
else:
if check_ports_in_destination_port_ranges(
required_ports,
rule.destination_port_ranges) is False:
return False
return True
|
fb6067d484a3698b2d10d297e3419510d1d8c4e9
| 3,648,559
|
import re
def text_cleanup(text: str) -> str:
"""
A simple text cleanup function that strips all new line characters and
substitutes consecutive white space characters by a single one.
:param text: Input text to be cleaned.
:return: The cleaned version of the text
"""
text.replace('\n', '')
return re.sub(r'\s{2,}', ' ', text)
|
84b9752f261f94164e2e83b944a2c12cee2ae5d8
| 3,648,560
|
from re import T
def geocode():
"""
Call a Geocoder service
"""
if "location" in request.vars:
location = request.vars.location
else:
session.error = T("Need to specify a location to search for.")
redirect(URL(r=request, f="index"))
if "service" in request.vars:
service = request.vars.service
else:
# @ToDo: service=all should be default
service = "google"
if service == "google":
return s3base.GoogleGeocoder(location, db).get_kml()
if service == "yahoo":
return s3base.YahooGeocoder(location, db).get_xml()
|
0ddc7380b3ff3ccfba5f9cf3fbcc489a92dd1ba0
| 3,648,561
|
def create_overide_pandas_func(
cls, func, verbose, silent, full_signature, copy_ok, calculate_memory
):
""" Create overridden pandas method dynamically with
additional logging using DataFrameLogger
Note: if we extracting _overide_pandas_method outside we need to implement decorator like here
https://stackoverflow.com/questions/10176226/how-do-i-pass-extra-arguments-to-a-python-decorator
:param cls: pandas class for which the method should be overriden
:param func: pandas method name to be overridden
:param silent: Whether additional the statistics get printed
:param full_signature: adding additional information to function signature
:param copy_ok: whether the dataframe is allowed to be copied to calculate more informative metadata logs
:return: the same function with additional logging capabilities
"""
def _run_method_and_calc_stats(
fn,
fn_args,
fn_kwargs,
input_df,
full_signature,
silent,
verbose,
copy_ok,
calculate_memory,
):
if copy_ok:
# If we're ok to make copies, copy the input_df so that we can compare against the output of inplace methods
try:
# Will hit infinite recursion if we use the patched copy method so use the original
original_input_df = getattr(
input_df, settings.ORIGINAL_METHOD_PREFIX + "copy"
)(deep=True)
except AttributeError:
original_input_df = input_df.copy(deep=True)
output_df, execution_stats = get_execution_stats(
cls, fn, input_df, fn_args, fn_kwargs, calculate_memory
)
if output_df is None:
# The operation was strictly in place so we just call the dataframe the output_df as well
output_df = input_df
if copy_ok:
# If this isn't true and the method was strictly inplace, input_df and output_df will just
# point to the same object
input_df = original_input_df
step_stats = StepStats(
execution_stats,
cls,
fn,
fn_args,
fn_kwargs,
full_signature,
input_df,
output_df,
)
step_stats.log_stats_if_needed(silent, verbose, copy_ok)
if isinstance(output_df, pd.DataFrame) or isinstance(output_df, pd.Series):
step_stats.persist_execution_stats()
return output_df
def _overide_pandas_method(fn):
if cls == pd.DataFrame:
register_method_wrapper = pf.register_dataframe_method
elif cls == pd.Series:
register_method_wrapper = pf.register_series_method
@register_method_wrapper
@wraps(fn)
def wrapped(*args, **fn_kwargs):
input_df, fn_args = args[0], args[1:]
output_df = _run_method_and_calc_stats(
fn,
fn_args,
fn_kwargs,
input_df,
full_signature,
silent,
verbose,
copy_ok,
calculate_memory,
)
return output_df
return wrapped
return exec(f"@_overide_pandas_method\ndef {func}(df, *args, **kwargs): pass")
|
db2bf7cb5d5395aeb700ca14211690750f056a91
| 3,648,562
|
def orthogonalize(U, eps=1e-15):
"""
Orthogonalizes the matrix U (d x n) using Gram-Schmidt Orthogonalization.
If the columns of U are linearly dependent with rank(U) = r, the last n-r columns
will be 0.
Args:
U (numpy.array): A d x n matrix with columns that need to be orthogonalized.
eps (float): Threshold value below which numbers are regarded as 0 (default=1e-15).
Returns:
(numpy.array): A d x n orthogonal matrix. If the input matrix U's cols were
not linearly independent, then the last n-r cols are zeros.
Examples:
```python
>>> import numpy as np
>>> import gram_schmidt as gs
>>> gs.orthogonalize(np.array([[10., 3.], [7., 8.]]))
array([[ 0.81923192, -0.57346234],
[ 0.57346234, 0.81923192]])
>>> gs.orthogonalize(np.array([[10., 3., 4., 8.], [7., 8., 6., 1.]]))
array([[ 0.81923192 -0.57346234 0. 0. ]
[ 0.57346234 0.81923192 0. 0. ]])
```
"""
n = len(U[0])
# numpy can readily reference rows using indices, but referencing full rows is a little
# dirty. So, work with transpose(U)
V = U.T
for i in range(n):
prev_basis = V[0:i] # orthonormal basis before V[i]
coeff_vec = np.dot(prev_basis, V[i].T) # each entry is np.dot(V[j], V[i]) for all j < i
# subtract projections of V[i] onto already determined basis V[0:i]
V[i] -= np.dot(coeff_vec, prev_basis).T
if la.norm(V[i]) < eps:
V[i][V[i] < eps] = 0. # set the small entries to 0
else:
V[i] /= la.norm(V[i])
return V.T
|
5807c0e5c7ee663391123076c8784cfb7e445760
| 3,648,563
|
import os
def get_files_under_dir(directory, ext='', case_sensitive=False):
"""
Perform recursive search in directory to match files with one of the
extensions provided
:param directory: path to directory you want to perform search in.
:param ext: list of extensions of simple extension for files to match
:param case_sensitive: is case of filename takes into consideration
:return: list of files that matched query
"""
if isinstance(ext, (list, tuple)):
allowed_exensions = ext
else:
allowed_exensions = [ext]
if not case_sensitive:
allowed_exensions = map(str.lower, allowed_exensions)
result = []
for root, dirs, files in os.walk(directory):
for filename in files:
check_filename = filename if case_sensitive else filename.lower()
if any(map(check_filename.endswith, allowed_exensions)):
result.append(filename)
return result
|
3ba3428d88c164fce850a29419b6eab46aa8b646
| 3,648,564
|
def boolean(func):
"""
Sets 'boolean' attribute (this attribute is used by list_display).
"""
func.boolean=True
return func
|
9bbf731d72e53aa9814caacaa30446207af036bd
| 3,648,565
|
def load_output_template_configs(items):
"""Return list of output template configs from *items*."""
templates = []
for item in items:
template = OutputTemplateConfig(
id=item["id"],
pattern_path=item.get("pattern-path", ""),
pattern_base=item.get("pattern-base", ""),
append_username_to_name=item.get("append-username-to-name", False),
append_colorspace_to_name=item.get("append-colorspace-to-name", False),
append_passname_to_name=item.get("append-passname-to-name", False),
append_passname_to_subfolder=item.get("append-passname-to-subfolder", False),
)
templates.append(template)
return tuple(templates)
|
028502662906230bf2619fa105caa1d525ff8e75
| 3,648,566
|
def read_keyword_arguments_section(docstring: Docstring, start_index: int) -> tuple[DocstringSection | None, int]:
"""
Parse a "Keyword Arguments" section.
Arguments:
docstring: The docstring to parse
start_index: The line number to start at.
Returns:
A tuple containing a `Section` (or `None`) and the index at which to continue parsing.
"""
arguments, index = read_arguments(docstring, start_index)
if arguments:
return DocstringSection(DocstringSectionKind.keyword_arguments, arguments), index
warn(docstring, index, f"Empty keyword arguments section at line {start_index}")
return None, index
|
9c789fd4b08d2f3d9e99d4db568ab710e4765c91
| 3,648,567
|
from typing import Mapping
from typing import Iterable
def is_builtin(x, drop_callables=True):
"""Check if an object belongs to the Python standard library.
Parameters
----------
drop_callables: bool
If True, we won't consider callables (classes/functions) to be builtin.
Classes have class `type` and functions have class
`builtin_function_or_method`, both of which are builtins - however,
this is often not what we mean when we want to know if something is
built in. Note: knowing the class alone is not enough to determine if
the objects it creates are built-in; this may depend on the kwargs
passed to its constructor. This will NOT check if a class was defined
in the standard library.
Returns
-------
bool: True if the object is built-in. If the object is list-like, each
item will be checked as well the container. If the object is dict-like,
each key AND value will be checked (you can always pass in d.keys() or
d.values() for more limited checking). Again, the container itself will
be checked as well.
"""
def _builtin(x, drop_callables):
if callable(x) and drop_callables:
return False
return x.__class__.__module__ == 'builtins'
builtin = partial(_builtin, drop_callables=drop_callables)
# Check mapping before iterable because mappings are iterable.
if isinstance(x, Mapping):
return builtin(x) and all(builtin(o) for o in flatten(x.items()))
elif isinstance(x, Iterable):
return builtin(x) and all(builtin(o) for o in flatten(x))
return builtin(x)
|
d84fbd770e048172d8c59315fbe24d58046f77b8
| 3,648,568
|
import yaml
def random_pair_selection(config_path,
data_size=100,
save_log="random_sents"):
"""
randomly choose from parallel data, and save to the save_logs
:param config_path:
:param data_size:
:param save_log:
:return: random selected pairs
"""
np.random.seed(32767)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs["data_configs"]
with open(data_configs["train_data"][0], "r") as src, \
open(data_configs["train_data"][1], "r") as trg, \
open(save_log+".src", "w") as out_src, open(save_log+".trg", "w") as out_trg:
counter=0
return_src=[]
return_trg=[]
for sent_s, sent_t in zip(src,trg):
if np.random.uniform()<0.2 and counter<data_size:
counter += 1
out_src.write(sent_s)
out_trg.write(sent_t)
return_src+=[sent_s.strip()]
return_trg+=[sent_t.strip()]
return return_src, return_trg
|
417b59bae49fe8aa0566f20f8ff371c7760e1a8a
| 3,648,569
|
from sys import version
def test_deployable():
"""
Check that 1) no untracked files are hanging out, 2) no staged but
uncommitted updates are outstanding, 3) no unstaged, uncommitted changes
are outstanding, 4) the most recent git tag matches HEAD, and 5) the most
recent git tag matches the current version.
"""
pytest.dbgfunc()
staged, changed, untracked = tbx.git_status()
assert untracked == [], "You have untracked files"
assert changed == [], "You have unstaged updates"
assert staged == [], "You have updates staged but not committed"
if tbx.git_current_branch() != 'master':
return True
last_tag = tbx.git_last_tag()
msg = "Version ({}) does not match tag ({})".format(version._v,
last_tag)
assert version._v == last_tag, msg
assert tbx.git_hash() == tbx.git_hash(last_tag), "Tag != HEAD"
|
32d8f0a49bbd212b6e0123a34d8844dd622b1ba6
| 3,648,570
|
from typing import OrderedDict
from typing import Counter
def profile_nominal(pairs, **options):
"""Return stats for the nominal field
Arguments:
:param pairs: list with pairs (row, value)
:return: dictionary with stats
"""
result = OrderedDict()
values = [r[1] for r in pairs]
c = Counter(values)
result['top'], result['freq'] = c.most_common(1)[0]
categories = list(c)
categories.sort()
result['categories'] = categories
result['categories_num'] = len(categories)
return result
|
00ef211e8f665a02f152e764c409668481c748cc
| 3,648,571
|
from typing import List
def class_definitions(cursor: Cursor) -> List[Cursor]:
"""
extracts all class definitions in the file pointed by cursor. (typical mocks.h)
Args:
cursor: cursor of parsing result of target source code by libclang
Returns:
a list of cursor, each pointing to a class definition.
"""
cursors = cursors_in_same_file(cursor)
class_cursors = []
for descendant in cursors:
# check if descendant is pointing to a class declaration block.
if descendant.kind != CursorKind.CLASS_DECL:
continue
if not descendant.is_definition():
continue
# check if this class is directly enclosed by a namespace.
if descendant.semantic_parent.kind != CursorKind.NAMESPACE:
continue
class_cursors.append(descendant)
return class_cursors
|
c2831b787905b02865890aa2680c37b97ec2e0a8
| 3,648,572
|
import os
def visualize_dataset(dataset_directory=None, mesh_filename_path=None):
"""
This method loads the mesh file from dataset directory and helps us to visualize it
Parameters:
dataset_directory (str): root dataset directory
mesh_filename_path (str): mesh file name to process
Returns:
mesh (trimesh object)
"""
try:
if dataset_directory is not None and mesh_filename_path is not None:
mesh = trimesh.load(os.path.join(dataset_directory, mesh_filename_path))
return mesh
except Exception:
print('Caught Exception while reading meshfile', exc_info=True)
|
53a91f923fd231fb467dfc970c91d7400c085035
| 3,648,573
|
def service_list_by_category_view(request, category):
"""Shows services for a chosen category.
If url doesn't link to existing category, return user to categories list"""
template_name = 'services/service-list-by-category.html'
if request.method == "POST":
contact_form = ContactForm(request.POST)
if contact_form.is_valid():
contact_form.save()
return redirect(reverse('accounts:profile'))
else:
if request.user.is_authenticated:
initial_data = {
"user": request.user,
"name": request.user.first_name + " " + request.user.last_name,
"email": request.user.email
}
form = ContactForm(
request.POST or None, initial=initial_data)
else:
form = ContactForm()
try:
obj = ServiceCategory.objects.get(name=category)
queryset = Service.objects.filter(category=obj.pk)
context = {
"obj": obj,
"queryset": queryset,
"form": form,
}
except ServiceCategory.DoesNotExist:
messages.error(request, 'No category named <em>' + category + '</em>.')
return redirect("services:services_list")
return render(request, template_name=template_name, context=context)
|
dcbed59c8b6876b7072eb82b27f6b10e829c2daa
| 3,648,574
|
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height) and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
transformed_board = []
l = len(board)
for idx1 in range(l):
line = []
for idx2 in range(l):
line.append(board[idx2][idx1])
line = ''.join(line)
transformed_board.append(line)
if not check_horizontal_visibility(transformed_board):
return False
return True
|
26ea379a165b90eadcf89640f00857e9e95146c7
| 3,648,575
|
def get_git_tree(pkg, g, top_prd):
"""
:return:
"""
global pkg_tree
global pkg_id
global pkg_list
global pkg_matrix
pkg_tree = Tree()
pkg_id = 0
pkg_list = dict()
# pkg_list['root'] = []
if pkg == '':
return None
if pkg in Config.CACHED_GIT_REPOS:
pkg_content = Config.CACHED_GIT_REPOS[pkg]
print("^", end="", flush=True)
else:
pkg_content = get_gitpkg_content(pkg, g)
if pkg_content:
# first node in the tree
if pkg in pkg_matrix.keys():
if top_prd not in pkg_matrix[pkg]:
pkg_matrix[pkg].append(top_prd)
else:
pkg_matrix[pkg] = [top_prd]
pkg_content.key = str(pkg_id) + "." +pkg_content.name
pkg_content.component_id = top_prd.id
pkg_content.component_name = top_prd.name
# print(pkg_content.key, pkg_content.pkey, pkg_content.name, pkg_content.ups_table, ">>>>>>", end="", flush=True)
pkg_tree.create_node(pkg_content.key, pkg_content.key, data=pkg_content)
if pkg not in Config.CACHED_GIT_REPOS.keys():
Config.CACHED_GIT_REPOS[pkg] = pkg_content
print("+", end="", flush=True)
for child in pkg_content.ups_table:
walk_git_tree(child, g, pkg_content.key, top_prd)
else:
return {'tree': None, 'deps': None}
# print(pkg_tree)
return {'tree': pkg_tree, 'deps': pkg_list}
|
05434882a476c8506804918cb44624c7734bf405
| 3,648,576
|
import requests
def get_requests_session():
"""Return an empty requests session, use the function to reuse HTTP connections"""
session = requests.session()
session.mount("http://", request_adapter)
session.mount("https://", request_adapter)
session.verify = bkauth_settings.REQUESTS_VERIFY
session.cert = bkauth_settings.REQUESTS_CERT
return session
|
e5921b12d29718e9ef1f503f902fed02a7c7e82f
| 3,648,577
|
def edit_role_description(rid, description, analyst):
"""
Edit the description of a role.
:param rid: The ObjectId of the role to alter.
:type rid: str
:param description: The new description for the Role.
:type description: str
:param analyst: The user making the change.
:type analyst: str
"""
description = description.strip()
Role.objects(id=rid,
name__ne=settings.ADMIN_ROLE).update_one(set__description=description)
return {'success': True}
|
f857755766da1f8f5be0e3dc255ed34aa7ed3ed3
| 3,648,578
|
def have_questions(pair, config, info=None):
"""
Return True iff both images are annotated with questions.
"""
qas = info["qas"]
c1id = pair[0]
if qas[c1id]['qas'] == []:
return False
c2id = pair[1]
if qas[c2id]['qas'] == []:
return False
return True
|
45a5f4babcc17ad5573008ca31773d51334144cd
| 3,648,579
|
def get_dist_genomic(genomic_data,var_or_gene):
"""Get the distribution associated to genomic data for its characteristics
Parameters: genomic_data (dict): with UDN ID as key and list with dictionaries as value,
dict contaning characteristics of the considered genomic data
var_or_gene (str): "Var" if variants, "Gen" otherwise
Returns: gene_effects (collec.Counter): distribution of characteristics for selected genomic data
"""
gene_list=[]
for patient in genomic_data:
for i in range(len(genomic_data[patient])):
if var_or_gene=="Var":
if "effect" in list(genomic_data[patient][i].keys()) and "gene" in list(genomic_data[patient][i].keys()):
gene_list.append([genomic_data[patient][i]["gene"],genomic_data[patient][i]["effect"]])
else:
gene_list.append([genomic_data[patient][i]["gene"],"NA"])
elif var_or_gene=="Gen":
if "status" in list(genomic_data[patient][i].keys()) and "gene" in list(genomic_data[patient][i].keys()):
gene_list.append([genomic_data[patient][i]["gene"],genomic_data[patient][i]["status"]])
else:
gene_list.append([genomic_data[patient][i]["gene"],"NA"])
else:
print("var_or_gene must be Var or Gen")
gene_effects=collec.Counter(np.array(gene_list)[:,1])
return gene_effects
|
ada0b7ecd57ace9799102e97bc9173d888c23565
| 3,648,580
|
def get_gmb_dataset_train(max_sentence_len):
"""
Returns the train portion of the gmb data-set. See TRAIN_TEST_SPLIT param for split ratio.
:param max_sentence_len:
:return:
"""
tokenized_padded_tag2idx, tokenized_padded_sentences, sentences = get_gmb_dataset(max_sentence_len)
return tokenized_padded_tag2idx[:int(len(tokenized_padded_tag2idx)*TRAIN_TEST_SPLIT)], \
tokenized_padded_sentences[:int(len(tokenized_padded_sentences)*TRAIN_TEST_SPLIT)], \
sentences[:int(len(sentences)*TRAIN_TEST_SPLIT)]
|
762800a0b986c74037e79dc1db92d5b2f6cd2e50
| 3,648,581
|
def is_answer_reliable(location_id, land_usage, expansion):
"""
Before submitting to DB, we judge if an answer reliable and set the location done if:
1. The user passes the gold standard test
2. Another user passes the gold standard test, and submitted the same answer as it.
Parameters
----------
location_id : int
ID of the location.
land_usage : int
User's answer of judging if the land is a farm or has buildings.
(check the answer table in model.py for the meaning of the values)
expansion : int
User's answer of judging the construction is expanded.
(check the answer table in model.py for the meaning of the values)
Return
------
bool
Result of the checking.
True : Matches another good answer candiate.
False : No other good answer candidates exist or match.
"""
# If another user passed the gold standard quality test, and submitted an answer to the same location.
good_answer_candidates = Answer.query.filter_by(gold_standard_status=1, location_id=location_id, land_usage=land_usage, expansion=expansion).all()
# If the good answer candidate doesn't exist
if len(good_answer_candidates) == 0:
return False
else:
return True
|
3aa510d68115ef519ec2a1318102d302aae81382
| 3,648,582
|
import numpy
def _polyfit_coeffs(spec,specerr,scatter,labelA,return_cov=False):
"""For a given scatter, return the best-fit coefficients"""
Y= spec/(specerr**2.+scatter**2.)
ATY= numpy.dot(labelA.T,Y)
CiA= labelA*numpy.tile(1./(specerr**2.+scatter**2.),(labelA.shape[1],1)).T
ATCiA= numpy.dot(labelA.T,CiA)
ATCiAinv= linalg.inv(ATCiA)
if return_cov:
return (numpy.dot(ATCiAinv,ATY),ATCiAinv)
else:
return numpy.dot(ATCiAinv,ATY)
|
9398fa2072625eb66ea8df2f79008577fe6aaabe
| 3,648,583
|
import os
from pathlib import Path
def get_repo_dir():
"""Get repository root directory"""
root_dir = './'
if os.path.isdir(Path(__file__).parent.parent / 'src'):
root_dir = f"{str((Path(__file__).parent.parent).resolve())}/"
elif os.path.isdir('../src'):
root_dir = '../'
elif os.path.isdir('./src'):
root_dir = './'
else:
log.warning('ROOT FOLDER NOT FOUND.')
return root_dir
|
efa688b3458ab282a427506578cbabfc2d329c1f
| 3,648,584
|
def colorize(data, colors, display_ranges):
"""Example:
colors = 'white', (0, 1, 0), 'red', 'magenta', 'cyan'
display_ranges = np.array([
[100, 3000],
[700, 5000],
[600, 3000],
[600, 4000],
[600, 3000],
])
rgb = fig4.colorize(data, colors, display_ranges)
plt.imshow(rgb)
"""
color_map = np.array([to_rgba(c)[:3] for c in colors])
dr = display_ranges[..., None, None]
normed = (data - dr[:, 0]) / (dr[:, 1] - dr[:, 0] )
# there's probably a nicer way to do this
rgb = (color_map.T[..., None, None] * normed[None, ...]).sum(axis=1)
return rgb.clip(min=0, max=1).transpose([1, 2, 0])
|
efb6ff9c0573da4a11cbfdbf55acaccbb69de216
| 3,648,585
|
import itertools
def multi_mdf(S, all_drGs, constraints, ratio_constraints=None, net_rxns=[],
all_directions=False, x_max=0.01, x_min=0.000001,
T=298.15, R=8.31e-3):
"""Run MDF optimization for all condition combinations
ARGUMENTS
S : pandas.DataFrame
Pandas DataFrame that corresponds to the stoichiometric matrix. Column
names are reaction IDs and row indices are compound names.
all_drGs : pandas.DataFrame
Pandas DataFrame with reaction IDs in the first column, condition
identifier strings in the intermediate columns, and reaction standard
Gibbs energies in float format in the last column.
constraints : pandas.DataFrame
Pandas DataFrame with a compound ID column (string), a lower
concentration bound column (float) and an upper concentration bound
colunn (float).
ratio_constraints : pandas.DataFrame, optional
Pandas DataFrame with two compound ID columns (string), a lower limit
concentration ratio column (float), an upper limit concentration ratio
column (float) and the concentration ratio range step number (int). The
third column is interpreted as the fixed ratio when the fourth column
contains a None value. The last column indicates the type of spacing to
use for ratio ranges (linear or logarithmic).
net_rxns : list of strings
List with strings referring to the background network reactions for
network-embedded MDF analysis (NEM). The reactions should be in S.
all_directions : bool, optional
Set to True to calculate MDF for all possible reaction direction
combinations. Not recommended for sets of reactions >20.
x_max : float
Maximum default metabolite concentration (M).
x_min : float
Minimum default metabolite concentration (M).
T : float
Temperature (K).
R : float
Universal gas constant (kJ/(mol*K)).
RETURNS
mdf_table : pandas.DataFrame
A Pandas DataFrame containing all MDF results for a single pathway. Each
row corresponds to one individual MDF optimization, with the parameters
described in the columns:
v0 ... : string
Condition identifiers as supplied in all_drGs.
drG_std(rxn_id) : float
The standard reaction Gibbs energy for the reaction 'rxn_id'.
[cpd_id_num]/[cpd_id_den] ... : float
Ratio of concentration between compounds 'cpd_id_num' and
'cpd_id_den'.
dir(rxn_id) ... : int
The direction used for the reaction 'rxn_id'. The order is the same
as the columns in S.
[cpd_id] ... : float
Optimized concentration for compound 'cpd_id' (M).
drG_opt(rxn_id) : float
The optimized reaction Gibbs energy for reaction 'rxn_id' (kJ/mol).
success : int
Indicates optimization success (1) or failure (0).
MDF : float
The Max-min Driving Force determined through linear optimization
(kJ/mol).
"""
# All drGs
# -> All ratio combinations
# -> All directions
# Number of reactions
n_rxn = S.shape[1]
# List the condition identifiers
conditions = list(all_drGs.columns[1:-1])
# Create column labels for output DataFrame
if ratio_constraints is not None:
ratio_labels = [
'ratio_' + ratio_constraints.iloc[row,:]['cpd_id_num'] + \
'_' + ratio_constraints.iloc[row,:]['cpd_id_den'] \
for row in range(ratio_constraints.shape[0])
]
else:
ratio_labels = []
column_labels = [
*conditions,
*['drGstd_' + rxn_id for rxn_id in list(S.columns)],
*ratio_labels,
*['dir_' + rxn_id for rxn_id in list(S.columns)],
*['c_' + cpd_id for cpd_id in list(S.index)],
*['drGopt_' + rxn_id for rxn_id in list(S.columns)],
'success',
'MDF'
]
# Also create labels for sorting (conditions, ratios and directions)
sort_labels = [
*conditions,
*ratio_labels,
*['dir_' + rxn_id for rxn_id in list(S.columns)]
]
# Iterator preparation
def prep_iter():
# Set up conditions iterator
if len(conditions):
cond_iter = all_drGs[conditions].drop_duplicates().iterrows()
else:
cond_iter = [None]
# Set up directions iterator
if not all_directions:
dir_iter = [[1.0]*n_rxn]
else:
dir_iter = itertools.product([1.0,-1.0], repeat=n_rxn)
# Set up ratios iterator
if ratio_constraints is not None:
rats_iter = ratio_iter(ratio_constraints)
else:
rats_iter = [None]
# Set up fixed concentration range constraints iterator
cons_iter = con_iter(constraints)
return itertools.product(cond_iter, dir_iter, rats_iter, cons_iter)
# Set up output DataFrame
mdf_table = pd.DataFrame(columns = column_labels)
# Determine number of rows that will be produced
M = 0
for i in prep_iter():
M += 1
# Iterate over all combinations of conditions, directions and ratios
n = 0
for params in prep_iter():
n += 1
progress = float(n / M * 100)
sWrite("\rPerforming MDF optimization... %0.1f%%" % progress)
# Extract specific condition, direction and ratio constraints
if params[0] is not None:
condition = pd.DataFrame(params[0][1]).T
else:
condition = None
direction = params[1]
rats = params[2]
constraints_mod = params[3]
# Obtain specific standard reaction Gibbs energies with correct sign
if condition is not None:
drGs = pd.merge(condition, all_drGs)
else:
drGs = all_drGs
drGs.is_copy = False
drGs.loc[:,['drG']] = drGs['drG'] * direction
# Modify direction (sign) of reactions in the stoichiometric matrix
S_mod = S * direction
# Set up MDF inputs
c = mdf_c(S_mod)
A = mdf_A(S_mod, net_rxns)
b = mdf_b(S_mod, drGs, constraints_mod, x_max, x_min, T, R)
# Use equality (ratio) constraints if they were specified
if rats is not None:
A_eq = mdf_A_eq(S_mod, rats)
b_eq = mdf_b_eq(rats)
# If the ratio constraints have been filtered out, set to None
if not A_eq.size or not b_eq.size:
A_eq = None
b_eq = None
else:
A_eq = None
b_eq = None
# Perform MDF
mdf_result = mdf(c, A, b, A_eq, b_eq)
# Prepare conditions list
if condition is not None:
conditions_list = list(condition.iloc[0,:])
else:
conditions_list = []
# Prepare ratios list
if rats is not None:
rats_list = list(rats.ratio)
else:
rats_list = []
# Format results row
mdf_row = [
*conditions_list,
*[float(drGs[drGs.rxn_id == rxn_id]['drG']) for rxn_id in S_mod.columns],
*rats_list,
*direction,
]
if mdf_result.success:
mdf_row.extend([
*np.exp(mdf_result.x[:-1]), # Concentrations
*calc_drGs(S_mod, drGs, mdf_result.x[:-1]), # Reaction Gibbs energies
1.0, # Success
mdf_result.x[-1]*R*T # MDF value
])
else:
mdf_row.extend([
*[np.nan]*S_mod.shape[0], # Concentrations
*[np.nan]*S_mod.shape[1], # Reaction Gibbs energies
0.0, # Failure
np.nan # No MDF value
])
# Append row to expected result
mdf_table = mdf_table.append(pd.DataFrame([mdf_row], columns = column_labels))
return mdf_table.sort_values(sort_labels)
|
3496105b764bc72b1c52ee28d419617798ad72cc
| 3,648,586
|
def nufft_adjoint(input, coord, oshape=None, oversamp=1.25, width=4.0, n=128):
"""Adjoint non-uniform Fast Fourier Transform.
Args:
input (array): Input Fourier domain array.
coord (array): coordinate array of shape (..., ndim).
ndim determines the number of dimension to apply nufft adjoint.
oshape (tuple of ints): output shape.
oversamp (float): oversampling factor.
width (float): interpolation kernel full-width in terms of oversampled grid.
n (int): number of sampling points of interpolation kernel.
Returns:
array: Transformed array.
See Also:
:func:`sigpy.nufft.nufft`
"""
device = backend.get_device(input)
xp = device.xp
ndim = coord.shape[-1]
beta = np.pi * (((width / oversamp) * (oversamp - 0.5))**2 - 0.8)**0.5
if oshape is None:
oshape = list(input.shape[:-coord.ndim + 1]) + estimate_shape(coord)
else:
oshape = list(oshape)
with device:
coord = _scale_coord(backend.to_device(coord, device), oshape, oversamp)
kernel = backend.to_device(
_kb(np.arange(n, dtype=coord.dtype) / n, width, beta, coord.dtype), device)
os_shape = oshape[:-ndim] + [_get_ugly_number(oversamp * i) for i in oshape[-ndim:]]
output = interp.gridding(input, os_shape, width, kernel, coord)
for a in range(-ndim, 0):
i = oshape[a]
os_i = os_shape[a]
idx = xp.arange(i, dtype=input.dtype)
os_shape[a] = i
# Swap axes
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
# Oversampled IFFT
output = ifft(output, axes=[-1], norm=None)
output *= os_i / i**0.5
output = util.resize(output, os_shape)
# Calculate apodization
apod = (beta**2 - (np.pi * width * (idx - i // 2) / os_i)**2)**0.5
apod /= xp.sinh(apod)
# Apodize
output *= apod
# Swap back
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
return output
|
d3d03ebe3d905cb647fab7d801592e148023709e
| 3,648,587
|
def get_bustools_version():
"""Get the provided Bustools version.
This function parses the help text by executing the included Bustools binary.
:return: tuple of major, minor, patch versions
:rtype: tuple
"""
p = run_executable([get_bustools_binary_path()], quiet=True, returncode=1)
match = VERSION_PARSER.match(p.stdout.read())
return tuple(int(ver) for ver in match.groups()) if match else None
|
7de14349b9349352c3532fbcc0be58be0f9756c7
| 3,648,588
|
import nose
import sys
def main():
"""
Args: none
Returns: exit code
Usage: python -m rununittest
"""
# Can use unittest or nose; nose here, which allows --with-coverage.
return nose.run(argv=[sys.argv[0], "-s", "--with-coverage", "rununittest"])
|
6eaaf5ebfbe9536364669f186d1c84a5fc31be05
| 3,648,589
|
def request_from_url(url):
"""Parses a gopher URL and returns the corresponding Request instance."""
pu = urlparse(url, scheme='gopher', allow_fragments=False)
t = '1'
s = ''
if len(pu.path) > 2:
t = pu.path[1]
s = pu.path[2:]
if len(pu.query) > 0:
s = s + '?' + pu.query
p = '70'
if pu.port:
p = str(pu.port)
return Request(t, pu.hostname, p, s)
|
aff334f8358edcae028b65fa1b4cf5727638eaad
| 3,648,590
|
import os
def getJsonPath(name, moduleFile):
"""
获取JSON配置文件的路径:
1. 优先从当前工作目录查找JSON文件
2. 若无法找到则前往模块所在目录查找
"""
currentFolder = os.getcwd()
currentJsonPath = os.path.join(currentFolder, name)
if os.path.isfile(currentJsonPath):
return currentJsonPath
else:
moduleFolder = os.path.abspath(os.path.dirname(moduleFile))
moduleJsonPath = os.path.join(moduleFolder, '.', name)
return moduleJsonPath
|
5f0dca485794dead91ecce70b4040809886186c3
| 3,648,591
|
def enable_pause_data_button(n, interval_disabled):
"""
Enable the play button when data has been loaded and data *is* currently streaming
"""
if n and n[0] < 1: return True
if interval_disabled:
return True
return False
|
4257a2deb9b8be87fe64a54129ae869623c323e8
| 3,648,592
|
import scipy
def dProj(z, dist, input_unit='deg', unit='Mpc'):
"""
Projected distance, physical or angular, depending on the input units (if
input_unit is physical, returns angular, and vice-versa).
The units can be 'cm', 'ly' or 'Mpc' (default units='Mpc').
"""
if input_unit in ('deg', 'arcmin', 'arcsec'):
Da = dA(z, unit=unit)
else:
Da = dA(z, unit=input_unit)
# from angular to physical
if input_unit == 'deg':
dist = Da * scipy.pi * dist / 180
elif input_unit == 'arcmin':
dist = Da * scipy.pi * dist / (180 * 60)
elif input_unit == 'arcsec':
dist = Da * scipy.pi * dist / (180 * 3600)
# from physical to angular
if unit == 'deg':
dist = dist * 180 / (scipy.pi * Da)
elif unit == 'arcmin':
dist = dist * 180 * 60 / (scipy.pi * Da)
elif unit == 'arcsec':
dist = dist * 180 * 3600 / (scipy.pi * Da)
return dist
|
13610816dfb94a92d6890d351312661b04e8604f
| 3,648,593
|
def savgoldiff(x, dt, params=None, options={}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
optimization_method='Nelder-Mead', optimization_options={'maxiter': 10}, metric='rmse'):
"""
Optimize the parameters for pynumdiff.linear_model.savgoldiff
See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.savgoldiff for detailed documentation.
"""
# initial condition
if params is None:
orders = [2, 3, 5, 7, 9, 11, 13]
window_sizes = [3, 10, 30, 50, 90, 130, 200, 300]
smoothing_wins = [3, 10, 30, 50, 90, 130, 200, 300]
params = []
for order in orders:
for window_size in window_sizes:
for smoothing_win in smoothing_wins:
params.append([order, window_size, smoothing_win])
# param types and bounds
params_types = [int, int, int]
params_low = [1, 3, 3]
params_high = [12, 1e3, 1e3]
# optimize
func = pynumdiff.linear_model.savgoldiff
args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
optimization_options=optimization_options)
return opt_params, opt_val
|
cb6da1a5fe3810ea1f481450667e548a9f64dae2
| 3,648,594
|
import base64
def credentials(scope="module"):
"""
Note that these credentials match those mentioned in test.htpasswd
"""
h = Headers()
h.add('Authorization',
'Basic ' + base64.b64encode("username:password"))
return h
|
4f0b1c17da546bfa655a96cfe5bcf74719dff55d
| 3,648,595
|
import sys
def _score_match(matchinfo: bytes, form, query) -> float:
""" Score how well the matches form matches the query
0.5: half of the terms match (using normalized forms)
1: all terms match (using normalized forms)
2: all terms are identical
3: all terms are identical, including case
"""
try:
if form == query:
return 3
if form.lower() == query.lower():
return 2
# Decode matchinfo blob according to https://www.sqlite.org/fts3.html#matchinfo
offset = 0
num_cols = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)
offset += 4
tokens = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)
offset += num_cols * 4
matched_tokens = int.from_bytes(matchinfo[offset : offset + 4], sys.byteorder)
# print(matchinfo, form, query, matched_tokens, tokens)
return matched_tokens / tokens
except Exception as e:
print(e)
raise
|
ef8c223b6c972f7b43fe46788ac894c5454e3f18
| 3,648,596
|
def _set_bias(clf, X, Y, recall, fpos, tneg):
"""Choose a bias for a classifier such that the classification
rule
clf.decision_function(X) - bias >= 0
has a recall of at least `recall`, and (if possible) a false positive rate
of at most `fpos`
Paramters
---------
clf : Classifier
classifier to use
X : array-like [M-examples x N-dimension]
feature vectors
Y : array [M-exmaples]
Binary classification
recall : float
Minimum fractional recall
fpos : float
Desired Maximum fractional false positive rate
tneg : int
Total number of negative examples (including previously-filtered
examples)
"""
df = clf.decision_function(X).ravel()
r = _recall_bias(df[Y == 1], recall)
f = _fpos_bias(df[Y == 1], fpos, tneg)
return min(r, f)
|
bbc903752d9abc93f723830e5c6c51459d18d0a5
| 3,648,597
|
from typing import Type
from typing import Dict
from typing import Any
def get_additional_params(model_klass: Type['Model']) -> Dict[str, Any]:
"""
By default, we dont need additional params to FB API requests. But in some instances (i.e. fetching Comments),
adding parameters makes fetching data simpler
"""
assert issubclass(model_klass, abstractcrudobject.AbstractCrudObject)
return _default_additional_params.get(model_klass, {})
|
1b4c934a06870a8ae1f2f999bab94fb286ee6126
| 3,648,598
|
def thread_loop(run):
"""decorator to make the function run in a loop if it is a thread"""
def fct(self, *args, **kwargs):
if self.use_thread:
while True:
run(*args, **kwargs)
else:
run(*args, **kwargs)
return fct
|
a68eee708bc0a1fe0a3da01e68ec84b6a43d9210
| 3,648,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.