content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def home():
"""List devices."""
devices = Device.query.all()
return render_template('devices/home.html', devices=devices)
|
d0f9b14cedf83fbeb35166e1ad9b2de295e2584f
| 3,643,100
|
def translate_value(document_field, form_value):
"""
Given a document_field and a form_value this will translate the value
to the correct result for mongo to use.
"""
value = form_value
if isinstance(document_field, ReferenceField):
value = document_field.document_type.objects.get(id=form_value) if form_value else None
return value
|
5c72764efde00fb4f5093a800706082c1171b5b6
| 3,643,101
|
def error_404(error):
"""Custom 404 Error Page"""
return render_template("error.html", error=error), 404
|
1f1429b8c86ed7a486c498cae6955961f3084ef5
| 3,643,102
|
def sum_num(n1, n2):
"""
Get sum of two numbers
:param n1:
:param n2:
:return:
"""
return(n1 + n2)
|
08477e596317f6b8750debd39b5cf0aa56da857c
| 3,643,103
|
def intensity_slice_volume(kernel_code,
image_variables,
g_variables,
blockdim,
bound_box,
vol_dim,
voxel_size,
poses,
out_points=False):
"""
Function that slices an intensity volume with fan shaped sections
section defined by poses of a curvilinear array
:param kernel_code: CUDA C++ kernel code to compile
:param image_variables: image dimensioning variable list
:param g_variables: All preallocated GPU variables
as described in the preallocation function. A list with
the following indexes:
0 - fan positions in 2D
1 - fan positions in 3D
2 - intensities mapped in fan positions
3 - the target intensity volume
4 - the output images in image space
5 - the 2D fan mask outline
:param blockdim: block dimensions for CUDA kernels
:param bound_box: bounding box of target volume
:param vol_dim: 3D intensity volume dimensions
:param voxel_size: voxel_size of the volume
:param poses: input set of poses
:param out_points: bool to get fan positions or not
:return: positions in 3D, stack of resulting images
"""
# First, compile kernel code with SourceModule
cuda_modules = SourceModule(kernel_code)
# Get image variables from input
fan_parameters = image_variables[0]
slice_dim = image_variables[1]
image_dim = image_variables[2]
pixel_size = image_variables[3]
# Define voxel size for intersection of intensity volume
voxel_size = voxel_size.astype(np.float32)
# Get size of one image, useful to get array of images
im_size = image_dim[0] * image_dim[1]
# Get block and grid dimensions as int
blockdim_x = int(blockdim[0])
blockdim_y = int(blockdim[1])
griddim_x = int(slice_dim[0] / blockdim_x)
griddim_y = int(slice_dim[1] / blockdim_y)
image_num = int(slice_dim[2])
# Convert poses to 1D array to be input in a kernel
pose_array = np.zeros((1, 9 * image_num)).astype(np.float32)
# And an array to offset fan position per image plane
offset_array = np.zeros((1, 3 * image_num)).astype(np.float32)
for p_ind in range(image_num):
pose = poses[:, 4 * p_ind:4 * (p_ind + 1)]
# Allocate the pose
pose_array[0, 9 * p_ind:9 * (p_ind + 1)] = \
np.hstack((pose[0, 0:2], pose[0, 3],
pose[1, 0:2], pose[1, 3],
pose[2, 0:2], pose[2, 3]))
# Allocate the offset
offset_array[0, 3 * p_ind:3 * (p_ind + 1)] = pose[0:3, 1]
# 1-Run position computation kernel, acts on index 0 and 1 of
# the gpu variables, get kernel
transform_kernel = cuda_modules.get_function("transform")
# Then run it
transform_kernel(g_variables[1],
g_variables[0],
drv.In(pose_array),
drv.In(offset_array),
drv.In(fan_parameters),
np.int32(image_num),
block=(blockdim_x, blockdim_y, 3),
grid=(griddim_x, griddim_y, image_num))
# Collect the output to a CPU array
positions_3d = np.empty((1, np.prod(slice_dim) * 3), dtype=np.float32)
# In case points are to be used or visualised (with out_points as True)
if out_points is True:
g_variables[1].get(positions_3d)
positions_3d = positions_3d.reshape([3, np.prod(slice_dim)]).T
# 2-Next step, run slicing kernel, where intensity values are
# placed in the positions. Define volume dimensions
intensity_volume_dims = np.hstack((bound_box[0, :],
vol_dim[0],
vol_dim[1],
vol_dim[2])).astype(np.float32)
# Call kernel from file
slice_kernel = cuda_modules.get_function('weighted_slice')
slice_kernel(g_variables[2],
g_variables[1],
g_variables[3],
drv.In(intensity_volume_dims),
drv.In(voxel_size),
drv.In(slice_dim),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# 3-Map pixels to fan like image
# Define bounds of image output in 2d coordinates as float
image_bounding_box = np.array([-image_dim[0] * pixel_size[0]/2*1000,
0, image_dim[0],
image_dim[1]]).astype(np.float32)
# Allocate output images, the intensity image as a float, and the
# fan outline as an int. These must be in CPU.
intensity_images = np.empty((1, np.prod(image_dim)), dtype=np.float32)
masks = np.empty((1, np.prod(image_dim)), dtype=np.int32)
# Call kernel from file
map_kernel = cuda_modules.get_function('intensity_map_back')
# Then run it, multiplying coordinates value by a 1000, in order
# to avoid sampling errors
map_kernel(g_variables[4],
g_variables[5],
g_variables[2],
g_variables[0]*1000,
drv.In(slice_dim),
drv.In(image_bounding_box),
drv.In(pixel_size*1000),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# Create a volume with generated images
intensity_image_array = np.zeros((image_dim[1],
image_dim[0],
image_dim[2])).astype(np.float32)
# Gather the results
g_variables[4].get(intensity_images)
g_variables[4].fill(0)
g_variables[5].get(masks)
g_variables[5].fill(0)
for plane in range(image_num):
# Get image and reshape it
current_image = intensity_images[0, im_size*plane:
im_size*(plane+1)]
# Get masks that weight values
current_mask = masks[0, im_size*plane:
im_size*(plane + 1)]
# Normalise by amount of points added to image output, using the
# the occurrences output by mask, ignoring divide error
with np.errstate(divide='ignore'):
current_image = np.divide(current_image, current_mask)
current_image = current_image.reshape(image_dim[0], image_dim[1]).T
# Scale intensities, by setting nan values to minimum
nan_indexes = np.where(np.isnan(current_image))
current_image[nan_indexes] = np.nanmin(current_image)
# Allocate to output
intensity_image_array[:, :, plane] = current_image
# Output a stack of images, where each z-slice has a plane,
# and the corresponding 3D positions
return positions_3d, intensity_image_array
|
c5f64f7ee5a95210a2c5598a7e31e36541fcb320
| 3,643,104
|
def mean_filter(img, kernel_size):
"""take mean value in the neighbourhood of center pixel.
"""
return cv2.blur(img, ksize=kernel_size)
|
58d5684e0691407f6f77d40d5717523eb617dde9
| 3,643,105
|
def main():
"""Return the module instance."""
return AnsibleModule(
argument_spec=dict(
data=dict(default=None),
path=dict(default=None, type=str),
file=dict(default=None, type=str),
)
)
|
846aa9bf9ce23ba7a05aeb91158ad04770b7721e
| 3,643,106
|
from typing import Optional
from pathlib import Path
def load_RegNetwork_interactions(
root_dir: Optional[Path] = None,
) -> pd.DataFrame:
"""
Loads RegNetwork interaction datafile. Downloads the file first if not already present.
"""
file = _download_RegNetwork(root_dir)
return pd.read_csv(
file, delimiter="\t", header=None, names=["g1", "id1", "g2", "id2"]
)
|
15571c71ac3bd386518a0f2ec4d293b20394c4b2
| 3,643,107
|
import sys
import json
import re
import requests
def hxlspec_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
""" Run hxlspec with command-line arguments.
Args:
args (list): a list of command-line arguments
stdin (io.IOBase): alternative standard input (mainly for testing)
stdout (io.IOBase): alternative standard output (mainly for testing)
stderr (io.IOBase): alternative standard error (mainly for testing)
"""
def get_json (url_or_filename):
if not url_or_filename:
return json.load(stdin)
if re.match(r'^(?:https?|s?ftp)://', url_or_filename.lower()):
headers = make_headers(args)
response = requests.get(url_or_filename, verify=(not args.ignore_certs), headers=headers)
response.raise_for_status()
return response.json()
else:
with open(url_or_filename, "r") as input:
return json.load(input)
parser = make_args('Process a HXL JSON spec')
args = parser.parse_args(args)
do_common_args(args)
spec = get_json(args.infile)
source = hxl.io.from_spec(spec, allow_local_ok=True)
with make_output(args, stdout) as output:
hxl.io.write_hxl(output.output, source, show_tags=not args.strip_tags)
|
e21e8ba95d91c116dda0ad320cdc9108e6a2a360
| 3,643,108
|
import time
import tqdm
import sys
def search_restaurant(house_list,
filter_by_distance=True,
search_range=3,
sort=True,
top_k=100,
save_csv=False,
offline_save=True):
"""Scraping restaurant information from opentable.com and calculate restaurant score for each house
:param house_list: All of the house information in a list
:param filter_by_distance: Whether to filter restaurants by their distance to each house
:param search_range: The distance range for filtering by distance, IN MILE !!
:param sort: For each restaurant dataframe, whether sort the restaurants by their scores
:param top_k: For each restaurant dataframe, only return restaurants with highest k score
:param save_csv: Whether save each restaurant dataframes as csv file
:param offline_save: Whether save restaurant score of each house in a text file for offline mode
"""
# house_list is the list generated by match_house function
scores = []
house_names = []
rest_dfs = []
csv_path = r"restaurant_csv"
check_path(csv_path)
time.sleep(0.05)
house_gen = tqdm(house_list, desc="Searching for restaurants", file=sys.stdout)
for house in house_gen:
house_latitude, house_longitude = house[1], house[2]
house_name = house[0]
csv_name = "%s_restaurants.csv" % house_name
save_path = csv_path + "/" + csv_name
rest_score, rest_df = generate_restaurant_score(house_latitude, house_longitude, filter_by_distance,
search_range, sort, top_k, save_csv, save_path)
scores.append(rest_score)
house_names.append(house_name)
rest_dfs.append(rest_df)
scores = np.array(scores)
scores = normalize(scores)
# The restaurant part of score for each house with house name is in `rest_scores`
rest_scores = list(zip(house_names, list(scores)))
offline_path = "restaurant_score_offline.txt"
if offline_save:
write_content = ["%s %f" % (house_name, score) for house_name, score in rest_scores]
with open(offline_path, "w") as file:
file.writelines(write_content)
return rest_scores, rest_dfs
|
ba38c6b06e1213b70e96f502e81f03862975df79
| 3,643,109
|
def get_model_config(model, dataset):
"""Map model name to model network configuration."""
if 'cifar10' == dataset.name:
return get_cifar10_model_config(model)
if model == 'vgg11':
mc = vgg_model.Vgg11Model()
elif model == 'vgg16':
mc = vgg_model.Vgg16Model()
elif model == 'vgg19':
mc = vgg_model.Vgg19Model()
elif model == 'lenet':
mc = lenet_model.Lenet5Model()
elif model == 'googlenet':
mc = googlenet_model.GooglenetModel()
elif model == 'overfeat':
mc = overfeat_model.OverfeatModel()
elif model == 'alexnet':
mc = alexnet_model.AlexnetModel()
elif model == 'trivial':
mc = trivial_model.TrivialModel()
elif model == 'inception3':
mc = inception_model.Inceptionv3Model()
elif model == 'inception4':
mc = inception_model.Inceptionv4Model()
elif model == 'resnet50' or model == 'resnet50_v2':
mc = resnet_model.ResnetModel(model, (3, 4, 6, 3))
elif model == 'resnet101' or model == 'resnet101_v2':
mc = resnet_model.ResnetModel(model, (3, 4, 23, 3))
elif model == 'resnet152' or model == 'resnet152_v2':
mc = resnet_model.ResnetModel(model, (3, 8, 36, 3))
else:
raise KeyError('Invalid model name \'%s\' for dataset \'%s\'' %
(model, dataset.name))
return mc
|
eb3da4fa2e7308fe0b7394b6c654e171abaf2363
| 3,643,110
|
from datetime import datetime
def utc_now():
"""Return current utc timestamp
"""
now = datetime.datetime.utcnow()
return int(now.strftime("%s"))
|
35edc0e19f236263a8f2efd0fa9be81663042484
| 3,643,111
|
def rrc_filter(alpha, length, osFactor, plot=False):
"""
Generates the impulse response of a root raised cosine filter.
Args:
alpha (float): Filter roll-off factor.
length (int): Number of symbols to use in the filter.
osFactor (int): Oversampling factor (number of samples per symbol).
plot (bool): Enable or disable plotting of filter impulse response.
Returns:
(NumPy array): Filter coefficients for use in np.convolve.
"""
if alpha < 0 or alpha > 1.0:
raise error.WfmBuilderError('Invalid \'alpha\' chosen. Use something between 0.1 and 1.')
filterOrder = length * osFactor
# Make GOOD and sure that filterOrder is an integer value
filterOrder = round(filterOrder)
if filterOrder % 2:
raise error.WfmBuilderError('Must use an even number of filter taps.')
delay = filterOrder / 2
t = np.arange(-delay, delay) / osFactor
# Calculate the impulse response without warning about the inevitable divide by zero operations
# I promise we will deal with those down the road
with np.errstate(divide='ignore', invalid='ignore'):
h = -4 * alpha / osFactor * (np.cos((1 + alpha) * np.pi * t) +
np.sin((1 - alpha) * np.pi * t) / (4 * alpha * t)) / (np.pi * ((4 * alpha * t) ** 2 - 1))
# Find middle point of filter and manually populate the value
# np.where returns a list of indices where the argument condition is True in an array. Nice.
idx0 = np.where(t == 0)
h[idx0] = -1 / (np.pi * osFactor) * (np.pi * (alpha - 1) - 4 * alpha)
# Define machine precision used to check for near-zero values for small-number arithmetic
eps = np.finfo(float).eps
# Find locations of divide by zero points
divZero = abs(abs(4 * alpha * t) - 1)
# np.where returns a list of indices where the argument condition is True. Nice.
idx1 = np.where(divZero < np.sqrt(eps))
# Manually populate divide by zero points
h[idx1] = 1 / (2 * np.pi * osFactor) * (np.pi * (alpha + 1) * np.sin(np.pi * (alpha + 1) /
(4 * alpha)) - 4 * alpha * np.sin(np.pi * (alpha - 1) /
(4 * alpha)) + np.pi * (alpha - 1) * np.cos(np.pi * (alpha - 1) / (4 * alpha)))
# Normalize filter energy to 1
h = h / np.sqrt(np.sum(h ** 2))
if plot:
plt.plot(t, h)
plt.title('Filter Impulse Response')
plt.ylabel('h(t)')
plt.xlabel('t')
plt.show()
return h
|
9fc5c916e646179ac465fb2d3d897d4dadadd9de
| 3,643,112
|
def get_available_services(project_dir: str):
"""Get standard services bundled with stakkr."""
services_dir = file_utils.get_dir('static') + '/services/'
conf_files = _get_services_from_dir(services_dir)
services = dict()
for conf_file in conf_files:
services[conf_file[:-4]] = services_dir + conf_file
services = _add_local_services(project_dir, services)
return services
|
b361eaefd0772ca9bcc75274f19e7550b02d1484
| 3,643,113
|
def build_insert(table, to_insert):
"""
Build an insert request.
Parameters
----------
table : str
Table where query will be directed.
to_insert: iterable
The list of columns where the values will be inserted.
Returns
-------
str
Built query.
"""
sql_q = 'INSERT INTO \"' + table + '\" ('
sql_q += ', '.join('{0}'.format(w) for w in to_insert)
sql_q += ') VALUES ('
sql_q += ', '.join(':{0}'.format(w) for w in to_insert)
sql_q += ')'
return sql_q
|
cf2e72c57e5502660ed3dcade6885076ff8c2014
| 3,643,114
|
from pathlib import Path
import json
def get_reference_data(fname):
"""
Load JSON reference data.
:param fname: Filename without extension.
:type fname: str
"""
base_dir = Path(__file__).resolve().parent
fpath = base_dir.joinpath('reference', 'data', fname + '.json')
with fpath.open() as f:
return json.load(f)
|
73880586393ce9463a356d69880f2f285058637f
| 3,643,115
|
def _is_l10n_ch_isr_issuer(account_ref, currency_code):
""" Returns True if the string account_ref is a valid a valid ISR issuer
An ISR issuer is postal account number that starts by 01 (CHF) or 03 (EUR),
"""
if (account_ref or '').startswith(ISR_SUBSCRIPTION_CODE[currency_code]):
return _is_l10n_ch_postal(account_ref)
return False
|
5709d8f67aefe9b9faac6f4541f8a050eb95c82f
| 3,643,116
|
import struct
def little_endian_uint32(i):
"""Return the 32 bit unsigned integer little-endian representation of i"""
s = struct.pack('<I', i)
return struct.unpack('=I', s)[0]
|
07f72baaf8f7143c732fd5b9e56b0b7d02d531bd
| 3,643,117
|
def evaluate_scores(scores_ID, scores_OOD):
"""calculates classification performance (ROCAUC, FPR@TPR95) based on lists of scores
Returns:
ROCAUC, fpr95
"""
labels_in = np.ones(scores_ID.shape)
labels_out = np.zeros(scores_OOD.shape)
y = np.concatenate([labels_in, labels_out])
score = np.concatenate([scores_ID, scores_OOD])
fpr, tpr, _ = roc_curve(y, score)
roc_auc = auc(fpr, tpr)
ii=np.where(tpr>0.95)[0][0]
return roc_auc, fpr[ii]
|
f88a67f09496700ab783a3e91347b085767a2228
| 3,643,118
|
from typing import Union
def check_cardinality(attribute_name: str,
analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the cardinality exceeds the predefined threshold
Args:
attribute_name: (string),
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of cardinality
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
cardinality = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.CARDINALITY:
cardinality = item.value
if cardinality > CARDINALITY_THRESHOLD:
return template.HIGH_CARDINALITY.format(
name=attribute_name,
value=cardinality
)
return None
|
8a9b9e0c709b64273a2120100730992276b52b46
| 3,643,119
|
def create_df_from(dataset):
"""
Selects a method, based on the given dataset name, and creates the corresponding dataframe.
When adding a new method, take care to have as index the ASN and the column names to be of the format "dataset_name_"+"column_name" (e.g., the column "X" from the dataset "setA", should be "setA_X")
:param dataset: (type = string) name of the dataset to be loaded
:return: A dataframe with indexes the ASNs and columns the features loaded from the given dataset
"""
if dataset == 'AS_rank':
data = create_df_from_AS_rank()
elif dataset == 'personal':
data = create_df_from_personal()
elif dataset == 'PeeringDB':
data = create_df_from_PeeringDB()
elif dataset == 'AS_hegemony':
data = create_df_from_AS_hegemony()
elif dataset == 'Atlas_probes':
data = create_df_from_Atlas_probes()
else:
raise Exception('Not defined dataset')
return data
|
3a5e6f1a9aa510ec19c6eeb1af8a89574b938ea1
| 3,643,120
|
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
|
f3b0c65f7bff6526a91c8d398a430a72cf744421
| 3,643,121
|
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
# TODO: Copy your code from the classroom and modify it to complete this function
stalled = False
unsolved_boxes = lambda values: len([b for b in boxes if len(values[b]) > 1])
unsolved_boxes_before = unsolved_boxes(values)
while not stalled:
values = eliminate(values)
unsolved_boxes_after = unsolved_boxes(values)
if unsolved_boxes_after == 0:
stalled = True
values = only_choice(values)
unsolved_boxes_after = unsolved_boxes(values)
if unsolved_boxes_after == 0:
stalled = True
values = naked_twins(values)
unsolved_boxes_after = unsolved_boxes(values)
if unsolved_boxes_after == 0:
stalled = True
# Make sure you stop when your stuck
if unsolved_boxes_after == unsolved_boxes_before:
stalled = True
# Catch unsolvable cases
if any(len(v) == 0 for v in values.values()):
return False
# Update number of unsolved boxes
unsolved_boxes_before = unsolved_boxes_after
return values
|
36665a4f933a77ce7d472e02fbafaf81beda6cad
| 3,643,122
|
import pickle
def read_ids():
"""
Reads the content from a file as a tuple and returns the tuple
:return: node_id, pool_id (or False if no file)
"""
if not const.MEMORY_FILE.exists():
return False
with open(const.MEMORY_FILE, 'rb') as f:
data = pickle.load(f)
assert type(data) is tuple and len(data) == 2
node_id, pool_id = data
return node_id, pool_id
|
89606543b149cac636765a6f3e2aef34f2adc38b
| 3,643,123
|
from pymbolic.primitives import Call
def _match_caller_callee_argument_dimension_(program, callee_function_name):
"""
Returns a copy of *program* with the instance of
:class:`loopy.kernel.function_interface.CallableKernel` addressed by
*callee_function_name* in the *program* aligned with the argument
dimensions required by *caller_knl*.
.. note::
The callee kernel addressed by *callee_function_name*, should be
called at only one location throughout the program, as multiple
invocations would demand complex renaming logic which is not
implemented yet.
"""
assert isinstance(program, TranslationUnit)
assert isinstance(callee_function_name, str)
assert callee_function_name not in program.entrypoints
assert callee_function_name in program.callables_table
is_invoking_callee = _FunctionCalledChecker(
callee_function_name).map_kernel
caller_knl, = [in_knl_callable.subkernel for in_knl_callable in
program.callables_table.values() if isinstance(in_knl_callable,
CallableKernel) and
is_invoking_callee(in_knl_callable.subkernel)]
assert len([insn for insn in caller_knl.instructions if (isinstance(insn,
CallInstruction) and isinstance(insn.expression, Call) and
insn.expression.function.name == callee_function_name)]) == 1
new_callee_kernel = _match_caller_callee_argument_dimension_for_single_kernel(
caller_knl, program[callee_function_name])
return program.with_kernel(new_callee_kernel)
|
7c37a20776e1ff551dca3f2acd1b36e47cf6b06e
| 3,643,124
|
def new_automation_jobs(issues):
"""
:param issues: issues object pulled from Redmine API
:return: returns a new subset of issues that are Status: NEW and match a term in AUTOMATOR_KEYWORDS)
"""
new_jobs = {}
for issue in issues:
# Only new issues
if issue.status.name == 'New':
# Strip whitespace and make lowercase ('subject' is the job type i.e. Diversitree)
subject = issue.subject.lower().replace(' ', '')
# Check for presence of an automator keyword in subject line
if subject == 'iridaretrieve':
new_jobs[issue] = subject
return new_jobs
|
74c9c96aeeea1d15384d617c266daa4d49f3a203
| 3,643,125
|
import os
def resolve_test_data_path(test_data_file):
"""
helper function to ensure filepath is valid
for different testing context (setuptools, directly, etc.)
:param test_data_file: Relative path to an input file.
:returns: Full path to the input file.
"""
if os.path.exists(test_data_file):
return test_data_file
else:
path = os.path.join('woudc_data_registry', 'tests', test_data_file)
if os.path.exists(path):
return path
|
d124bcbc36b48fd6572697c9a5211f794c3dce19
| 3,643,126
|
def make_data(revs, word_idx_map, max_l=50, filter_h=3, val_test_splits=[2, 3], validation_num=500000):
"""
Transforms sentences into a 2-d matrix.
"""
version = begin_time()
train, val, test = [], [], []
for rev in revs:
sent = get_idx_from_sent_msg(rev["m"], word_idx_map, max_l, True)
sent += get_idx_from_sent(rev["r"], word_idx_map, max_l, True)
sent += get_session_mask(rev["m"])
sent.append(int(rev["y"]))
if len(val) >= validation_num:
train.append(sent)
else:
val.append(sent)
train = np.array(train, dtype="int")
val = np.array(val, dtype="int")
test = np.array(test, dtype="int")
print('trainning data', len(train), 'val data',
len(val), 'spend time:', spend_time(version))
return [train, val, test]
|
b141297c0ef8d2eeb2c6c62e00924f5e64ffe266
| 3,643,127
|
def init(param_test):
"""
Initialize class: param_test
"""
# initialization
param_test.default_args_values = {'di': 6.85, 'da': 7.65, 'db': 7.02}
default_args = ['-di 6.85 -da 7.65 -db 7.02'] # default parameters
param_test.default_result = 6.612133606
# assign default params
if not param_test.args:
param_test.args = default_args
return param_test
|
d86cd246d4beb5aa267d222bb12f9637f001032d
| 3,643,128
|
def add_width_to_df(df):
"""Adds an extra column "width" to df which is the angular width of the CME
in degrees.
"""
df = add_helcats_to_df(df, 'PA-N [deg]')
df = add_helcats_to_df(df, 'PA-S [deg]')
df = add_col_to_df(df, 'PA-N [deg]', 'PA-S [deg]', 'subtract', 'width', abs_col=True)
return df
|
ea866d161ca77d9d78f04fb613aa1ed8631566b2
| 3,643,129
|
def checkSeconds(seconds, timestamp):
""" Return a string depending on the value of seconds
If the block is mined since one hour ago, return timestamp
"""
if 3600 > seconds > 60:
minute = int(seconds / 60)
if minute == 1:
return '{} minute ago'.format(minute)
return '{} minutes ago'.format(minute)
else:
return 'Since {} sec'.format(seconds)
|
2d07657a14300793a116d28e7c9495ae4a1b61ed
| 3,643,130
|
def get_netrange_end(asn_cidr):
"""
:param str asn_cidr: ASN CIDR
:return: ipv4 address of last IP in netrange
:rtype: str
"""
try:
last_in_netrange = \
ip2long(str(ipcalc.Network(asn_cidr).host_first())) + \
ipcalc.Network(asn_cidr).size() - 2
except ValueError, error:
print 'Issue calculating size of %s network' % asn_cidr
raise error
return socket.inet_ntoa(struct.pack('!L', last_in_netrange))
|
51305dc1540bbc0a361452a80d6732b1eb039fd4
| 3,643,131
|
def load_from_file(filepath, column_offset=0, prefix='', safe_urls=False, delimiter='\s+'):
"""
Load target entities and their labels if exist from a file.
:param filepath: Path to the target entities
:param column_offset: offset to the entities column (optional).
:param prefix: URI prefix (Ex: https://yago-expr.org) if the data lacks one.
(needed when using rdflib and/or virtouso) (optional)
:param safe_urls: Encode URIs if they are not safe for rdflib, eg. contains '(' or special chars (optional)
:param delimiter: splitting delimiter in the file (optional)
:return: EntityLabelsInterface object to access the entities and their labels and also to use them as triples.
:rtype: EntitiesLabelsFile
"""
return EntitiesLabelsFile(filepath, column_offset=column_offset, prefix=prefix, safe_urls=safe_urls, delimiter=delimiter)
|
2aa9b286e25c6e93a06afb927f7e0ad345208afb
| 3,643,132
|
def create_app(*, config_object: Config) -> connexion.App:
"""Create app instance."""
connexion_app = connexion.App(
__name__, debug=config_object.DEBUG, specification_dir="spec/"
)
flask_app = connexion_app.app
flask_app.config.from_object(config_object)
connexion_app.add_api("api.yaml")
return connexion_app
|
2d5f698be823f18075bd21a8ffb92fa9b14d9a78
| 3,643,133
|
from typing import Any
from typing import Union
import os
import zipimport
import zipfile
def load_plugins(descr: str, package: str, plugin_class: Any,
specs: TList[TDict[str, Any]] = None) -> \
TDict[Union[str, int], Any]:
"""
Load and initialize plugins from the given directory
:param descr: plugin description
:param package: plugin package name relative to afterglow_core, e.g.
"resources.data_provider_plugins"
:param plugin_class: base plugin class
:param specs: list of plugin specifications: [{"name": "plugin_name",
"param": value, ...}, ...]; parameters are used to construct the plugin
class; this can be the value of the corresponding option in app config,
e.g. DATA_PROVIDERS; if omitted or None, load all available plugins
without passing any parameters on initialization (suitable e.g. for the
jobs)
:return: dictionary containing plugin class instances indexed by their
unique IDs (both as integers and strings)
"""
if not specs and specs is not None:
# No plugins of this type are required
return {}
directory = os.path.normpath(os.path.join(
os.path.dirname(__file__), package.replace('.', os.path.sep)))
app.logger.debug('Looking for %s plugins in %s', descr, directory)
# Search for modules within the specified directory
# noinspection PyBroadException
try:
# py2exe/freeze support
if not isinstance(__loader__, zipimport.zipimporter):
raise Exception()
archive = zipfile.ZipFile(__loader__.archive)
try:
dirlist = [name for name in archive.namelist()
if name.startswith(directory.replace('\\', '/'))]
finally:
archive.close()
except Exception:
# Normal installation
# noinspection PyBroadException
try:
dirlist = os.listdir(directory)
except Exception:
dirlist = []
dirlist = [os.path.split(name)[1] for name in dirlist]
plugin_classes = {}
for name in {os.path.splitext(f)[0] for f in dirlist
if os.path.splitext(f)[1] in PY_SUFFIXES and
os.path.splitext(f)[0] != '__init__'}:
# noinspection PyBroadException
try:
app.logger.debug('Checking module "%s"', name)
# A potential plugin module is found; load it
m = __import__(
'afterglow_core.' + package + '.' + name, globals(), locals(),
['__dict__'])
try:
# Check only names listed in __all__
items = (m.__dict__[_name] for _name in m.__dict__['__all__'])
except KeyError:
# If no __all__ is present in the module, check all globals
items = m.__dict__.values()
# Scan all items defined in the module, looking for classes
# derived from "plugin_class"
for item in items:
try:
if issubclass(item, plugin_class) and \
item is not plugin_class and \
getattr(item, '__polymorphic_on__', None) and \
hasattr(item, item.__polymorphic_on__) and \
isinstance(getattr(item, item.__polymorphic_on__),
str) and \
item.__module__ == m.__name__:
plugin_classes[getattr(item,
item.__polymorphic_on__)] = item
app.logger.debug(
'Found %s plugin "%s"', descr,
getattr(item, item.__polymorphic_on__))
except TypeError:
pass
except Exception:
# Ignore modules that could not be imported
app.logger.debug(
'Could not import module "%s"', name, exc_info=True)
plugins = {}
if specs is None:
# Initialize all available plugins without any options
for name, klass in plugin_classes.items():
# Initialize plugin instance; provide the polymorphic field equal
# to plugin name to instantiate the appropriate subclass instead
# of the base plugin class
try:
instance = klass(
_set_defaults=True,
**{klass.__polymorphic_on__:
getattr(klass, klass.__polymorphic_on__)})
except Exception:
app.logger.exception(
'Error loading %s plugin "%s"', descr, name)
raise
add_plugin(plugins, descr, instance)
else:
# Instantiate only the given plugins using the specified display names
# and options
for id, spec in enumerate(specs):
try:
name = spec.pop('name')
except (TypeError, KeyError):
raise RuntimeError(
'Missing name in {} plugin spec ({})'.format(descr, spec))
try:
klass = plugin_classes[name]
except KeyError:
raise RuntimeError(
'Unknown {} plugin "{}"'.format(descr, name))
# Initialize plugin instance using the provided parameters
try:
instance = klass(**spec)
except Exception:
app.logger.exception(
'Error loading %s plugin "%s" with options %s',
descr, name, spec)
raise
add_plugin(plugins, descr, instance, id)
return plugins
|
62ec68740ffc30e478cb6bad2365b6ead2f2a46c
| 3,643,134
|
def _get_repos_info(db: Session, user_id: int):
"""Returns data for all starred repositories for a user.
The return is in a good format for the frontend.
Args:
db (Session): sqlAlchemy connection object
user_id (int): User id
Returns:
list[Repository(dict)]:repo_info = {
"id": (int),
"github_repo_id": (int),
"name": (str),
"description": (str),
"html_url": (str),
"tags": list[dict]
}
"""
repos = _get_repos_in_db(db=db, user_id=user_id,
only_starred_repos=True)
list_of_repos = []
for repo in repos:
repo_info = {
"id": repo.id,
"github_repo_id": repo.github_repo_id,
"name": repo.name,
"description": repo.description,
"html_url": repo.html_url,
"tags": _get_all_tags_in_repo(repo_id=repo.id, db=db)
}
list_of_repos.append(repo_info)
return list_of_repos
|
78a126369355c1c76fc6a1b673b365e3423cd011
| 3,643,135
|
def ultimate_oscillator(close_data, low_data):
"""
Ultimate Oscillator.
Formula:
UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1)
"""
a7 = 4 * average_7(close_data, low_data)
a14 = 2 * average_14(close_data, low_data)
a28 = average_28(close_data, low_data)
uo = 100 * ((a7 + a14 + a28) / 7)
return uo
|
9803eda656cdb9dd49621a93785b55cf5bc15e7c
| 3,643,136
|
import hashlib
import os
def generate_csrf(request: StarletteRequest,
secret_key: str,
field_name: str):
"""Generate a new token, store it in the session and return a time-signed
token. If a token is already present in the session, it will be used to
generate a new time signed token. The time-signed token is cached per
request so multiple calls to this function will return the same time-signed
token.
Args:
request (:class:`starlette.requests.Request`): The request instance.
secret_key (str): The signing key.
field_name (str): Where the token is stored in the session.
Returns:
str: The time-signed token
"""
if not hasattr(request.state, field_name):
# handle Secret instances
if isinstance(secret_key, Secret):
secret_key = str(secret_key)
s = URLSafeTimedSerializer(secret_key, salt='wtf-csrf-token')
session = request.session
# get/set token in session
if field_name not in session:
session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest()
try:
token = s.dumps(session[field_name])
except TypeError:
session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest()
token = s.dumps(session[field_name])
setattr(request.state, field_name, token)
return getattr(request.state, field_name)
|
a79bcd6640112db3729a160689c00e6f24677398
| 3,643,137
|
def get_completions():
"""
Returns the global completion list.
"""
return completionList
|
901718ea73b5328c277c357ecac859b40518890d
| 3,643,138
|
def breadth_first_search():
"""
BFS Algorithm
"""
initial_state = State(3, 3, "left", 0, 0)
if initial_state.is_goal():
return initial_state
frontier = list()
explored = set()
frontier.append(initial_state)
while frontier:
state = frontier.pop(0)
if state.is_goal():
return state
explored.add(state)
children = successors(state)
for child in children:
if (child not in explored) or (child not in frontier):
frontier.append(child)
return None
|
2c0dca233b2bdb4474dd17ee7386ed15f5af44c1
| 3,643,139
|
import pandas
def rankSimilarity(df, top = True, rank = 3):
""" Returns the most similar documents or least similar documents
args:
df (pandas.Dataframe): row, col = documents, value = boolean similarity
top (boolean): True: most, False: least (default = True)
rank (int): number of top or bottom (default = 3)
returns:
pandas.Dataframe: row =rank, columns = indices, names, value
"""
df2 = df.copy(deep = True)
df_np = df2.as_matrix()
if top:
np.fill_diagonal(df_np, -1)
results_dic = {"indices": [], "names": [], "value": [] }
for n in range(rank):
if top:
indices = np.unravel_index(df_np.argmax(), df_np.shape) # returns indices of first max found
# np.where(df_np == df_np.max()) # will return all indices of maxs
else:
indices = np.unravel_index(df_np.argmin(), df_np.shape) # returns indices of first min found
# np.where(df_np == df_np.min()) # will return all indices of mins
results_dic["indices"].append(indices)
results_dic["names"].append((df.index[indices[0]], df.index[indices[1]]))
results_dic["value"].append(df.iloc[indices])
if top:
df_np[indices[0],indices[1]] = -1 # set to -1 to find the next max
df_np[indices[1],indices[0]] = -1 # because symmetric
else:
df_np[indices[0],indices[1]] = 1 # set to 1 to find the next min
df_np[indices[1],indices[0]] = 1 # because symmetric
df_result = pandas.DataFrame(results_dic, index = range(1,rank+1))
df_result.index.name = "rank"
return df_result
|
7ae5a90ced7dbbd79d5f296a6f31f1236384ba7a
| 3,643,140
|
def change_controller(move_group, second_try=False):
"""
Changes between motor controllers
move_group -> Name of required move group.
"""
global list_controllers_service
global switch_controllers_service
controller_map = {
'gripper': 'cartesian_motor_controller',
'whole_arm': 'cartesian_motor_controller',
'realsense': 'cartesian_motor_controller_realsense',
'sucker': 'cartesian_motor_controller_sucker',
'wrist_only': 'cartesian_motor_controller_wrist'
}
rospy.loginfo('SWITCHING CONTROLLERS')
if move_group not in controller_map:
rospy.logerr('%s is not a valid move group for switching controllers' % move_group)
return False
wanted_controller = controller_map[move_group]
c_list = list_controllers_service.call()
running_controllers = []
for c in c_list.controller:
if c.name == 'joint_state_controller':
continue
if c.name == wanted_controller and c.state == 'running':
rospy.loginfo('Controller %s is already running' % wanted_controller)
return True
if c.state == 'running':
running_controllers.append(c.name)
controllerSwitch = cmsv.SwitchControllerRequest()
controllerSwitch.strictness = 1
controllerSwitch.start_controllers = [wanted_controller]
controllerSwitch.stop_controllers = running_controllers
# Return True if controller was successfully switched
res = switch_controllers_service(controllerSwitch).ok
if res:
rospy.loginfo('Successfully switched controllers for move group %s' % move_group)
return res
elif second_try == False:
rospy.logerr('Failed to switch controllers for move group %s' % move_group)
rospy.sleep(1.0)
return change_controller(move_group, True)
else:
return False
|
8521e1c2967368a5c8ac956fc26d4da879919a2d
| 3,643,141
|
import base64
def base64_encode(text):
"""<string> -- Encode <string> with base64."""
return base64.b64encode(text.encode()).decode()
|
ce837abde42e9a00268e14cfbd2bd4fd3cf16208
| 3,643,142
|
def _signed_bin(n):
"""Transform n into an optimized signed binary representation"""
r = []
while n > 1:
if n & 1:
cp = _gbd(n + 1)
cn = _gbd(n - 1)
if cp > cn: # -1 leaves more zeroes -> subtract -1 (= +1)
r.append(-1)
n += 1
else: # +1 leaves more zeroes -> subtract +1 (= -1)
r.append(+1)
n -= 1
else:
r.append(0) # be glad about one more zero
n >>= 1
r.append(n)
return r[::-1]
|
5f9f57e02942264901f6523962b21d1c36accdb2
| 3,643,143
|
import os
def load_tests(loader, tests, pattern):
"""Provide a TestSuite to the discovery process."""
test_dir = os.path.join(os.path.dirname(__file__), name)
return driver.build_tests(test_dir, loader,
host=data['host'],
port=data['port'],
prefix=data['prefix'],
fixture_module=fixtures)
|
75d15dc8fef7f04cbcc56235d8318ad1b4a31928
| 3,643,144
|
def powderfit(powder, scans=None, peaks=None, ki=None, dmono=3.355,
spacegroup=1):
"""Fit powder peaks of a powder sample to calibrate instrument wavelength.
First argument is either a string that names a known material (currently
only ``'YIG'`` is available) or a cubic lattice parameter. Then you need
to give either scan numbers (*scans*) or peak positions (*peaks*) and a
neutron wavevector (*ki*). Examples:
>>> powderfit('YIG', scans=[1382, 1383, 1384, ...])
>>> powderfit(12.377932, peaks=[45.396, 61.344, 66.096, ...], ki=1.4)
As a further argument, *dmono* is the lattice constant of the monochromator
(only used to calculate monochromator 2-theta offsets), it defaults to PG
(3.355 A).
"""
maxhkl = 10 # max H/K/L to consider when looking for d-values
maxdd = 0.2 # max distance in d-value when looking for peak indices
ksteps = 50 # steps with different ki
dki = 0.002 # relative ki stepsize
if powder == 'YIG':
a = 12.377932
spacegroup = 230
session.log.info('YIG: using cubic lattice constant of %.6f A', a)
session.log.info('')
else:
if not isinstance(powder, float):
raise UsageError('first argument must be either "YIG" or a '
'lattice constant')
a = powder
sg = get_spacegroup(spacegroup)
# calculate (possible) d-values
# loop through some hkl-sets, also consider higher harmonics...
dhkls = {}
for h in range(maxhkl):
for k in range(maxhkl):
for l in range(maxhkl):
if h + k + l > 0: # assume all reflections are possible
if not can_reflect(sg, h, k, l):
continue
G = sqrt(h*h + k*k + l*l)
dhkls[a/G] = '(%d %d %d)' % (h, k, l)
dhkls[a/(2*G)] = '(%d %d %d)/2' % (h, k, l)
dhkls[a/(3*G)] = '(%d %d %d)/3' % (h, k, l)
dhkls[a/(4*G)] = '(%d %d %d)/4' % (h, k, l)
dhkls[a/(5*G)] = '(%d %d %d)/5' % (h, k, l)
# generate list from dict
dvals = sorted(dhkls)
# fit and helper functions
def dk2tt(d, k):
return 2.0 * degrees(arcsin(pi/(d * k)))
def model(x, k, stt0):
return stt0 + dk2tt(x, k)
data = {}
if not peaks:
if not scans:
raise UsageError('please give either scans or peaks argument')
for dataset in session.experiment.data.getLastScans():
num = dataset.counter
if num not in scans:
continue
res = _extract_powder_data(num, dataset)
session.log.debug('powder_data from %d: %s', num, res)
if res:
ki, peaks = res
data.setdefault(ki, []).extend([None, p, dp, '#%d ' % num]
for (p, dp) in peaks)
if not data:
session.log.warning('no data found, check the scan numbers!')
return
else:
if scans:
raise UsageError('please give either scans or peaks argument')
if not ki:
raise UsageError('please give ki argument together with peaks')
data[float(ki)] = [[None, p, 0.1, ''] for p in peaks]
beststt0s = []
bestmtt0s = []
bestrms = 1.0
bestlines = []
orig_data = data
for j in [0] + [i * s for i in range(1, ksteps) for s in (-1, 1)]:
out = []
p = out.append
data = deepcopy(orig_data)
# now iterate through data (for all ki and for all peaks) and try to
# assign a d-value assuming the ki not to be completely off!
for ki1 in sorted(data):
new_ki = ki1 + j*dki*ki1
# iterate over ki specific list, start at last element
for el in reversed(data[ki1]):
tdval = pi/new_ki/sin(abs(radians(el[1]/2.))) # dvalue from scan
distances = [(abs(d-tdval), i) for (i, d) in enumerate(dvals)]
mindist = min(distances)
if mindist[0] > maxdd:
p('%speak at %7.3f -> no hkl found' % (el[3], el[1]))
data[ki1].remove(el)
else:
el[0] = dvals[mindist[1]]
if el[1] < 0:
el[0] *= -1
p('%speak at %7.3f could be %s at d = %-7.4f' %
(el[3], el[1], dhkls[abs(el[0])], el[0]))
p('')
restxt = []
restxt.append('___final_results___')
restxt.append('ki_exp #peaks | ki_fit dki_fit mtt_0 lambda | '
'stt_0 dstt_0 | chisqr')
stt0s = []
mtt0s = []
rms = 0
for ki1 in sorted(data):
new_ki = ki1 + j*dki*ki1
peaks = data[ki1]
failed = True
if len(peaks) > 2:
fit = Fit('ki', model, ['ki', 'stt0'], [new_ki, 0])
res = fit.run([el[0] for el in peaks], [el[1] for el in peaks],
[el[2] for el in peaks])
failed = res._failed
if failed:
restxt.append('%4.3f %-6d | No fit!' % (ki1, len(peaks)))
rms += 1e6
continue
mtt0 = dk2tt(dmono, res.ki) - dk2tt(dmono, ki1)
restxt.append('%5.3f %-6d | %-7.4f %-7.4f %-7.4f %-7.4f | '
'%-7.4f %-7.4f | %.2f' %
(ki1, len(peaks), res.ki, res.dki, mtt0, 2*pi/res.ki,
res.stt0, res.dstt0, res.chi2))
stt0s.append(res.stt0)
mtt0s.append(mtt0)
peaks_fit = [model(el[0], res.ki, res.stt0) for el in peaks]
p('___fitted_peaks_for_ki=%.3f___' % ki1)
p('peak dval measured fitpos delta')
for i, el in enumerate(peaks):
p('%-10s %7.3f %7.3f %7.3f %7.3f%s' % (
dhkls[abs(el[0])], el[0], el[1], peaks_fit[i],
peaks_fit[i] - el[1],
'' if abs(peaks_fit[i] - el[1]) < 0.10 else " **"))
p('')
rms += sum((pobs - pfit)**2 for (pobs, pfit) in
zip([el[1] for el in peaks], peaks_fit)) / len(peaks)
out.extend(restxt)
session.log.debug('')
session.log.debug('-' * 80)
session.log.debug('result from run with j=%d (RMS = %g):', j, rms)
for line in out:
session.log.debug(line)
if rms < bestrms:
beststt0s = stt0s
bestmtt0s = mtt0s
bestrms = rms
bestlines = out
session.log.debug('')
session.log.debug('*** new best result: RMS = %g', rms)
if not beststt0s:
session.log.warning('No successful fit results!')
if ki is not None:
session.log.warning('Is the initial guess for ki too far off?')
return
for line in bestlines:
session.log.info(line)
meanstt0 = sum(beststt0s)/len(beststt0s)
meanmtt0 = sum(bestmtt0s)/len(bestmtt0s)
session.log.info('Check errors (dki, dstt0)! RMS = %.3g', bestrms)
session.log.info('')
session.log.info('Adjust using:')
# TODO: fix suggestions using adjust()
session.log.info('mtt.offset += %.4f', meanmtt0)
session.log.info('mth.offset += %.4f', meanmtt0 / 2)
session.log.info('stt.offset += %.4f', meanstt0)
return CommandLineFitResult((meanmtt0, meanstt0))
|
ae2bfddd0ab95924ec3d56a1f86cc4aa9a685c9e
| 3,643,145
|
def get_neighbor_v6_by_ids(obj_ids):
"""Return NeighborV6 list by ids.
Args:
obj_ids: List of Ids of NeighborV6's.
"""
ids = list()
for obj_id in obj_ids:
try:
obj = get_neighbor_v6_by_id(obj_id).id
ids.append(obj)
except exceptions.NeighborV6DoesNotExistException as e:
raise api_rest_exceptions.ObjectDoesNotExistException(str(e))
except Exception as e:
raise api_rest_exceptions.NetworkAPIException(str(e))
return NeighborV6.objects.filter(id__in=ids)
|
a51d618961fa3e60c0c464473838791d55ba1f6a
| 3,643,146
|
import base64
def decode_b64_to_image(b64_str: str) -> [bool, np.ndarray]:
"""解码base64字符串为OpenCV图像, 适用于解码三通道彩色图像编码.
:param b64_str: base64字符串
:return: ok, cv2_image
"""
if "," in b64_str:
b64_str = b64_str.partition(",")[-1]
else:
b64_str = b64_str
try:
img = base64.b64decode(b64_str)
return True, cv2.imdecode(np.frombuffer(img, dtype=np.int8), 1)
except cv2.error:
return False, None
|
66f0e7bb5028ad7247ef7cb468e904c2bc7afdb7
| 3,643,147
|
def _get_index_videos(course, pagination_conf=None):
"""
Returns the information about each video upload required for the video list
"""
course_id = str(course.id)
attrs = [
'edx_video_id', 'client_video_id', 'created', 'duration',
'status', 'courses', 'transcripts', 'transcription_status',
'error_description'
]
def _get_values(video):
"""
Get data for predefined video attributes.
"""
values = {}
for attr in attrs:
if attr == 'courses':
course = [c for c in video['courses'] if course_id in c]
(__, values['course_video_image_url']), = list(course[0].items())
else:
values[attr] = video[attr]
return values
videos, pagination_context = _get_videos(course, pagination_conf)
return [_get_values(video) for video in videos], pagination_context
|
5a3288ff8c2f371505fe2c6a3051992bfcc602eb
| 3,643,148
|
def get_user_by_api_key(api_key, active_only=False):
"""
Get a User object by api_key, whose attributes match those in the database.
:param api_key: API key to query by
:param active_only: Set this flag to True to only query for active users
:return: User object for that user ID
:raises UserDoesNotExistException: If no user exists with the given user_id
"""
if active_only:
user = models.User.query.filter_by(api_key=api_key, is_active=True).first()
else:
user = models.User.query.filter_by(api_key=api_key).first()
if not user:
raise UserDoesNotExistException('No user with api_key {api_key} exists'.format(api_key=api_key))
return user
|
b36373dbfcda80f6aac963153a66b54bce1d828d
| 3,643,149
|
def get_pixel_values_of_line(img, x0, y0, xf, yf):
"""
get the value of a line of pixels.
the line defined by the user using the corresponding first and last
pixel indices.
Parameters
----------
img : np.array.
image on a 2d np.array format.
x0 : int
raw number of the starting pixel
y0 : int
column number of the starting pixel.
xf : int
raw number of the ending pixel.
yf : int
column number of the ending pixel.
Returns
-------
line_pixel_values : np.array
1d np.array representing the values of the chosen line of pixels.
"""
rr, cc = np.array(draw.line(x0, y0, xf, yf))
# line_pixel_values = [img[rr[i], cc[i]] for i in range(len(rr))]
line_pixel_values = img[rr, cc]
return line_pixel_values
|
ea78efe02130302b34ba8402f21349035b05b2e0
| 3,643,150
|
def _filter_out_variables_not_in_dataframe(X, variables):
"""Filter out variables that are not present in the dataframe.
Function removes variables that the user defines in the argument `variables`
but that are not present in the input dataframe.
Useful when ussing several feature selection procedures in a row. The dataframe
input to the first selection algorithm likely contains more variables than the
input dataframe to subsequent selection algorithms, and it is not possible a
priori, to say which variable will be dropped.
Parameters
----------
X: pandas DataFrame
variables: string, int or list of (strings or int).
Returns
-------
filtered_variables: List of variables present in `variables` and in the
input dataframe.
"""
# When variables is not defined, keep it like this and return None.
if variables is None:
return None
# If an integer or a string is provided, convert to a list.
if not isinstance(variables, list):
variables = [variables]
# Filter out elements of variables that are not in the dataframe.
filtered_variables = [var for var in variables if var in X.columns]
# Raise an error if no column is left to work with.
if len(filtered_variables) == 0:
raise ValueError(
"After filtering no variable remaining. At least 1 is required."
)
return filtered_variables
|
63b4cce75741a5d246f40c5b88cfebaf818b3482
| 3,643,151
|
import gzip
def file_format(input_files):
"""
Takes all input files and checks their first character to assess
the file format. 3 lists are return 1 list containing all fasta files
1 containing all fastq files and 1 containing all invalid files
"""
fasta_files = []
fastq_files = []
invalid_files = []
# Open all input files and get the first character
for infile in input_files:
try:
f = gzip.open(infile, "rb")
fst_char = f.read(1)
except OSError:
f = open(infile, "rb")
fst_char = f.read(1)
f.close()
#fst_char = f.readline().decode("ascii")[0]
#print(fst_char)
# Return file format based in first char
if fst_char == b'@':
fastq_files.append(infile)
elif fst_char == b'>':
fasta_files.append(infile)
else:
invalid_files.append(infile)
return (fasta_files, fastq_files, invalid_files)
|
acd9a0f7b49884d611d0ac65b43407a323a6588b
| 3,643,152
|
def sub_vector(v1: Vector3D, v2: Vector3D) -> Vector3D:
"""Substract vector V1 from vector V2 and return resulting Vector.
Keyword arguments:
v1 -- Vector 1
v2 -- Vector 2
"""
return [v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]]
|
2c9878d6775fdcee554f959e392b2c8d2bad8c8e
| 3,643,153
|
def create_validity_dict(validity_period):
"""Convert a validity period string into a dict for issue_certificate().
Args:
validity_period (str): How long the signed certificate should be valid for
Returns:
dict: A dict {"Value": number, "Type": "string" } representation of the
validity period
"""
validity_suffix = validity_period[-1:]
if validity_suffix == "d":
validity_unit = "DAYS"
elif validity_suffix == "m":
validity_unit = "MONTHS"
elif validity_suffix == "y":
validity_unit = "YEARS"
return {"Value": int(validity_period[:-1]), "Type": validity_unit}
|
ba0ccdd5c009a930b4030b15fbafaa978fe753d4
| 3,643,154
|
def analyse_latency(cid):
"""
Parse the resolve_time and download_time info from cid_latency.txt
:param cid: cid of the object
:return: time to resolve the source of the content and time to download the content
"""
resolve_time = 0
download_time = 0
with open(f'{cid}_latency.txt', 'r') as stdin:
for line in stdin.readlines():
"""
The output of the ipfs get <cid> command is in the form of:
Started: 02-19-2022 01:51:16
Resolve Ended: 02-19-2022 01:51:16
Resolve Duraution: 0.049049
Download Ended: 02-19-2022 01:51:16
Download Duraution: 0.006891
Total Duraution: 0.055940
"""
if "Resolve Duraution:" in line:
resolve_time = line.split(": ")[1]
resolve_time = resolve_time.split("\n")[0]
if "Download Duraution:" in line:
download_time = line.split(": ")[1]
download_time = download_time.split("\n")[0]
return resolve_time, download_time
|
806a9969cc934faeea842901442ecececfdde232
| 3,643,155
|
import re
def process_ref(paper_id):
"""Attempt to extract arxiv id from a string"""
# if user entered a whole url, extract only the arxiv id part
paper_id = re.sub("https?://arxiv\.org/(abs|pdf|ps)/", "", paper_id)
paper_id = re.sub("\.pdf$", "", paper_id)
# strip version
paper_id = re.sub("v[0-9]+$", "", paper_id)
# remove leading arxiv, i.e., such that paper_id=' arXiv: 2001.1234' is still valid
paper_id = re.sub("^\s*arxiv[:\- ]", "", paper_id, flags=re.IGNORECASE)
return paper_id
|
a1c817f1ae7b211973efd6c201b5c13e1a91b57b
| 3,643,156
|
import re
def augment_test_func(test_func):
"""Augment test function to parse log files.
`tools.create_tests` creates functions that run an LBANN
experiment. This function creates augmented functions that parse
the log files after LBANN finishes running, e.g. to check metrics
or runtimes.
Note: The naive approach is to define the augmented test functions
in a loop. However, Python closures are late binding. In other
words, the function would be overwritten every time we define it.
We get around this overwriting problem by defining the augmented
function in the local scope of another function.
Args:
test_func (function): Test function created by
`tools.create_tests`.
Returns:
function: Test that can interact with PyTest.
"""
test_name = test_func.__name__
# Define test function
def func(cluster, dirname):
# Run LBANN experiment
experiment_output = test_func(cluster, dirname)
# Parse LBANN log file
train_accuracy = None
gpu_usage = None
mini_batch_times = []
gpu_usages = []
with open(experiment_output['stdout_log_file']) as f:
for line in f:
match = re.search('training epoch [0-9]+ objective function : ([0-9.]+)', line)
if match:
train_accuracy = float(match.group(1))
match = re.search('training epoch [0-9]+ mini-batch time statistics : ([0-9.]+)s mean', line)
if match:
mini_batch_times.append(float(match.group(1)))
match = re.search('GPU memory usage statistics : ([0-9.]+) GiB mean', line)
if match:
gpu_usages.append(float(match.group(1)))
# Check if training accuracy is within expected range
assert (expected_accuracy_range[0]
< train_accuracy
<expected_accuracy_range[1]), \
'train accuracy is outside expected range'
#Only tested on Ray. Skip if mini-batch test on another cluster. Change this when mini-batch values are available for other clusters
# Check if mini-batch time is within expected range
# Note: Skip first epoch since its runtime is usually an outlier
mini_batch_times = mini_batch_times[1:]
mini_batch_time = sum(mini_batch_times) / len(mini_batch_times)
assert (0.75 * expected_mini_batch_times[cluster]
< mini_batch_time
< 1.25 * expected_mini_batch_times[cluster]), \
'average mini-batch time is outside expected range'
# Check for GPU usage and memory leaks
# Note: Skip first epoch
gpu_usages = gpu_usages[1:]
gpu_usage = sum(gpu_usages)/len(gpu_usages)
assert (0.75 * expected_gpu_usage[cluster]
< gpu_usage
< 1.25 * expected_gpu_usage[cluster]),\
'average gpu usage is outside expected range'
# Return test function from factory function
func.__name__ = test_name
return func
|
081593b57dfc82df328617b22cf778fceffe4beb
| 3,643,157
|
def get_cuda_arch_flags(cflags):
"""
For an arch, say "6.1", the added compile flag will be
``-gencode=arch=compute_61,code=sm_61``.
For an added "+PTX", an additional
``-gencode=arch=compute_xx,code=compute_xx`` is added.
"""
# TODO(Aurelius84):
return []
|
dfb92aa00db7f5d515b7b296824f6bdd91fa2724
| 3,643,158
|
def nstep_td(env, pi, alpha=1, gamma=1, n=1, N_episodes=1000,
ep_max_length=1000):
"""Evaluates state-value function with n-step TD
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 144
Args:
env: Environment
pi: Policy
alpha: Step size
gamma: Discount factor
n: Number of steps
N_episodes: Run this many episodes
ep_max_length: Force termination of episode after this number of steps
Returns:
v: State-value function
"""
v = defaultdict(lambda: 0)
for i_episode in range(N_episodes):
print("\r> N-step TD: Episode {}/{}".format(
i_episode+1, N_episodes), end="")
state = env.reset()
rewards = [0]
states = []
t = 0
T = np.inf
done = False
while t < T and t < ep_max_length:
if not done:
action = select_action_policy(pi, state)
state_new, reward, done, info = env.step(action)
rewards.append(reward)
states.append(state)
state = state_new
if done:
T = t+n+1
if t-n >= 0:
G = 0
for i in range(min(n,T-t)):
G += gamma**i * rewards[t-n+1+i]
if t < T-n:
G += gamma**n * v[states[t]]
v[states[t-n]] += alpha*(G - v[states[t-n]])
t += 1
print()
return v
|
86d5ab58d4d185dcbf08a84bbc8eb67051d2af21
| 3,643,159
|
def open_file(path):
"""more robust open function"""
return open(path, encoding='utf-8')
|
785ab196756365d1f27ce3fcd69d0ba2867887a9
| 3,643,160
|
def test_subscribe(env):
"""Check async. interrupt if a process terminates."""
def child(env):
yield env.timeout(3)
return 'ohai'
def parent(env):
child_proc = env.process(child(env))
subscribe_at(child_proc)
try:
yield env.event()
except Interrupt as interrupt:
assert interrupt.cause[0] is child_proc
assert interrupt.cause[1] == 'ohai'
assert env.now == 3
env.process(parent(env))
env.run()
|
fa3170cc6167e92195587f06ae65b27da48fa8ff
| 3,643,161
|
import tempfile
import os
import setuptools
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
fd, fname = tempfile.mkstemp('.cpp', 'main', text=True)
with os.fdopen(fd, 'w') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
|
b407af028221187c683e2821c507c10ec218ea86
| 3,643,162
|
import gzip
def _parse_data(f, dtype, shape):
"""Parses the data."""
dtype_big = np.dtype(dtype).newbyteorder(">")
count = np.prod(np.array(shape))
# See: https://github.com/numpy/numpy/issues/13470
use_buffer = type(f) == gzip.GzipFile
if use_buffer:
data = np.frombuffer(f.read(), dtype_big, count)
else:
data = np.fromfile(f, dtype_big, count)
return data.astype(dtype).reshape(shape)
|
42185d2425aa9aa14abc0a61a5bdabc95224d15c
| 3,643,163
|
def test_target(target # type: Any
):
"""
A simple decorator to declare that a case function is associated with a particular target.
>>> @test_target(int)
>>> def case_to_test_int():
>>> ...
This is actually an alias for `@case_tags(target)`, that some users may find a bit more readable.
:param target: for example a function, a class... or a string representing a function, a class...
:return:
"""
return case_tags(target)
|
ebbf94941e7b11224ee4c8ee9665cea231076f5d
| 3,643,164
|
def plot_spatial(adata, color, img_key="hires", show_img=True, **kwargs):
"""Plot spatial abundance of cell types (regulatory programmes) with colour gradient
and interpolation (from Visium anndata).
This method supports only 7 cell types with these colours (in order, which can be changed using reorder_cmap).
'yellow' 'orange' 'blue' 'green' 'purple' 'grey' 'white'
:param adata: adata object with spatial coordinates in adata.obsm['spatial']
:param color: list of adata.obs column names to be plotted
:param kwargs: arguments to plot_spatial_general
:return: matplotlib figure
"""
if show_img is True:
kwargs["show_img"] = True
kwargs["img"] = list(adata.uns["spatial"].values())[0]["images"][img_key]
# location coordinates
if "spatial" in adata.uns.keys():
kwargs["coords"] = (
adata.obsm["spatial"] * list(adata.uns["spatial"].values())[0]["scalefactors"][f"tissue_{img_key}_scalef"]
)
else:
kwargs["coords"] = adata.obsm["spatial"]
fig = plot_spatial_general(value_df=adata.obs[color], **kwargs) # cell abundance values
return fig
|
ffbf3cc0f6efdef9bf66b94bac22ef8bf8b39bab
| 3,643,165
|
def stats_by_group(df):
"""Calculate statistics from a groupby'ed dataframe with TPs,FPs and FNs."""
EPSILON = 1e-10
result = df[['tp', 'fp', 'fn']].sum().reset_index().assign(
precision=lambda x: (x['tp'] + EPSILON) /
(x['tp'] + x['fp'] + EPSILON),
recall=lambda x: (x['tp'] + EPSILON) /
(x['tp'] + x['fn'] + EPSILON)).assign(
f1=lambda x: 2 * x['precision'] * x['recall'] /
(x['precision'] + x['recall'] + EPSILON),
count=lambda x: x['tp'] + x['fn'])
result['proportion'] = result['count'] / np.sum(result['count'])
result['proportion_text'] = (result['proportion'] *
100).round(2).astype(str) + "%"
return result
|
c137e4076f837f51b0cab1acbe842ff827b62ee8
| 3,643,166
|
def uncolorize(text):
""" Attempts to remove color and reset flags from text via regex pattern
@text: #str text to uncolorize
-> #str uncolorized @text
..
from redis_structures.debug import uncolorize
uncolorize('\x1b[0;34mHello world\x1b[1;m')
# -> 'Hello world'
..
"""
return _find_colors.sub("", text)
|
2bc011d755412ac1b9ca0b7e24afc3ed14dbe7ba
| 3,643,167
|
def in_this_prow(prow):
"""
Returns a bool describing whether this processor inhabits `prow`.
Args:
prow: The prow.
Returns:
The bool.
"""
return prow == my_prow()
|
0f159cc9b57f407cbfefe9892689664f6d902f94
| 3,643,168
|
def _keypair_from_file(key_pair_file: str) -> Keypair:
"""Returns a Solana KeyPair from a file"""
with open(key_pair_file) as kpf:
keypair = kpf.read()
keypair = keypair.replace("[", "").replace("]", "")
keypair = list(keypair.split(","))
keypair = [int(i) for i in keypair]
return Keypair(keypair[:32])
|
1fdb4d72945d89db7c8d26c96bcbbd18071258dc
| 3,643,169
|
import binascii
def val_to_bitarray(val, doing):
"""Convert a value into a bitarray"""
if val is sb.NotSpecified:
val = b""
if type(val) is bitarray:
return val
if type(val) is str:
val = binascii.unhexlify(val.encode())
if type(val) is not bytes:
raise BadConversion("Couldn't get bitarray from a value", value=val, doing=doing)
b = bitarray(endian="little")
b.frombytes(val)
return b
|
17081bb8b382763fa5ace4d7d2969b6eed4581ed
| 3,643,170
|
def unpack_uint64_from(buf, offset=0):
"""Unpack a 64-bit unsigned integer from *buf* at *offset*."""
return _uint64struct.unpack_from(buf, offset)[0]
|
ce01d76d18e45a42687d997459da9113d9e3e45f
| 3,643,171
|
def del_none(dictionary):
"""
Recursively delete from the dictionary all entries which values are None.
Args:
dictionary (dict): input dictionary
Returns:
dict: output dictionary
Note:
This function changes the input parameter in place.
"""
for key, value in list(dictionary.items()):
if value is None:
del dictionary[key]
elif isinstance(value, dict):
del_none(value)
return dictionary
|
48b76272ed20bbee38b5293ede9f5d824950aec5
| 3,643,172
|
from sys import path
import tempfile
def get_model(model_dir, suffix=""):
"""return model file, model spec object, and list of extra data items
this function will get the model file, metadata, and extra data
the returned model file is always local, when using remote urls
(such as v3io://, s3://, store://, ..) it will be copied locally.
returned extra data dict (of key, DataItem objects) allow reading additional model files/objects
e.g. use DataItem.get() or .download(target) .as_df() to read
example::
model_file, model_artifact, extra_data = get_model(models_path, suffix='.pkl')
model = load(open(model_file, "rb"))
categories = extra_data['categories'].as_df()
:param model_dir: model dir or artifact path (store://..) or DataItem
:param suffix: model filename suffix (when using a dir)
:returns: model filename, model artifact object, extra data dict
"""
model_file = ""
model_spec = None
extra_dataitems = {}
suffix = suffix or ".pkl"
if hasattr(model_dir, "artifact_url"):
model_dir = model_dir.artifact_url
if is_store_uri(model_dir):
model_spec, target = store_manager.get_store_artifact(model_dir)
if not model_spec or model_spec.kind != "model":
raise ValueError(f"store artifact ({model_dir}) is not model kind")
model_file = _get_file_path(target, model_spec.model_file)
extra_dataitems = _get_extra(target, model_spec.extra_data)
elif model_dir.lower().endswith(".yaml"):
model_spec = _load_model_spec(model_dir)
model_file = _get_file_path(model_dir, model_spec.model_file)
extra_dataitems = _get_extra(model_dir, model_spec.extra_data)
elif model_dir.endswith(suffix):
model_file = model_dir
else:
dirobj = store_manager.object(url=model_dir)
model_dir_list = dirobj.listdir()
if model_spec_filename in model_dir_list:
model_spec = _load_model_spec(path.join(model_dir, model_spec_filename))
model_file = _get_file_path(model_dir, model_spec.model_file, isdir=True)
extra_dataitems = _get_extra(model_dir, model_spec.extra_data, is_dir=True)
else:
extra_dataitems = _get_extra(
model_dir, {v: v for v in model_dir_list}, is_dir=True
)
for file in model_dir_list:
if file.endswith(suffix):
model_file = path.join(model_dir, file)
break
if not model_file:
raise ValueError(f"cant resolve model file for {model_dir} suffix{suffix}")
obj = store_manager.object(url=model_file)
if obj.kind == "file":
return model_file, model_spec, extra_dataitems
temp_path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False).name
obj.download(temp_path)
return temp_path, model_spec, extra_dataitems
|
121be1b8a0100db8b41a2c4aa66f57021bf54562
| 3,643,173
|
def get_table_header(driver):
"""Return Table columns in list form """
header = driver.find_elements(By.TAG_NAME, value= 'th')
header_list = [item.text for index, item in enumerate(header) if index < 10]
return header_list
|
631e71e357beb37f50defe16fe894f5be3356516
| 3,643,174
|
from rx.core.operators.connectable.refcount import _ref_count
from typing import Callable
def ref_count() -> Callable[[ConnectableObservable], Observable]:
"""Returns an observable sequence that stays connected to the
source as long as there is at least one subscription to the
observable sequence.
"""
return _ref_count()
|
e6f8b21e582d46fab75d9013121d764072630390
| 3,643,175
|
import logging
def no_dry_run(f):
"""A decorator which "disables" a function during a dry run.
A can specify a `dry_run` option in the `devel` section of `haas.cfg`.
If the option is present (regardless of its value), any function or
method decorated with `no_dry_run` will be "disabled." The call will
be logged (with level `logging.DEBUG`), but will not actually execute.
The function will instead return 'None'. Callers of decorated functions
must accept a None value gracefully.
The intended use case of `no_dry_run` is to disable functions which
cannot be run because, for example, the HaaS is executing on a
developer's workstation, which has no configured switch, libvirt, etc.
If the `dry_run` option is not specified, this decorator has no effect.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if have_dry_run():
logger = logging.getLogger(__name__)
logger.info('dry run, not executing: %s.%s(*%r,**%r)' %
(f.__module__, f.__name__, args, kwargs))
return None
else:
return f(*args, **kwargs)
return wrapper
|
ba32ce4885e9b55aa858a14e963de414ed9f170f
| 3,643,176
|
def radius_provider_modify(handle, name, **kwargs):
"""
modifies a radius provider
Args:
handle (UcsHandle)
name (string): radius provider name
**kwargs: key-value pair of managed object(MO) property and value, Use
'print(ucscoreutils.get_meta_info(<classid>).config_props)'
to get all configurable properties of class
Returns:
AaaRadiusProvider: managed object
Raises:
UcsOperationError: if AaaRadiusProvider is not present
Example:
radius_provider_modify(handle, name="test_radius_prov", timeout="5")
"""
mo = radius_provider_get(handle, name, caller="radius_provider_modify")
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
handle.commit()
return mo
|
9a5c5d62ff60a3a3a8499e4aaa944f758dc49f83
| 3,643,177
|
def _read_table(table_node):
"""Return a TableData object for the 'table' element."""
header = []
rows = []
for node in table_node:
if node.tag == "th":
if header:
raise ValueError("cannot handle multiple headers")
elif rows:
raise ValueError("encountered header after rows")
else:
header = node.text.strip()
elif node.tag == "tr":
rows.append(node.text.strip())
return create_table(header, rows)
|
e6ef6e5d5ec99ea2b15ddfcae61b4dd817f8232b
| 3,643,178
|
def postorder(root: Node):
"""
Post-order traversal visits left subtree, right subtree, root node.
>>> postorder(make_tree())
[4, 5, 2, 3, 1]
"""
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
|
ddeaa6e0f2f466284d69908dfc7eb67bdc6748c8
| 3,643,179
|
def merge_triangulations(groups):
"""
Each entry of the groups list is a list of two (or one) triangulations.
This function takes each pair of triangulations and combines them.
Parameters
----------
groups : list
List of pairs of triangulations
Returns
-------
list
List of merged triangulations
"""
triangulations = []
for group in groups:
if len(group)==2:
# Find the first edges to connect the seperate triangulations
ldi, rdi = lowest_common_tangent(group[0], group[1])
# Combine the two hulls into a single set of edges
base, d_triang = combine_triangulations(ldi, rdi, group[0], group[1])
# Given the starting base edge, fill in the edges between the hulls
d_triang = zip_hulls(base, d_triang)
triangulations.append(d_triang)
else:
triangulations.append(group[0])
return [triangulations[i:i+2] for i in range(0, len(triangulations), 2)]
|
0d39006892e0b248e1f50a62c86911c830b100ce
| 3,643,180
|
from lxml import etree as et
from rasterio.crs import CRS
from rasterio.transform import Affine
def make_vrt_list(feat_list, band=None):
"""
take a list of stac features and band(s) names and build gdal
friendly vrt xml objects in list.
band : list, str
Can be a list or string of name of band(s) required.
"""
# imports
# check if band provided, if so and is str, make list
if band is None:
bands = []
elif not isinstance(band, list):
bands = [band]
else:
bands = band
# check features type, length
if not isinstance(feat_list, list):
raise TypeError('Features must be a list of xml objects.')
elif not len(feat_list) > 0:
raise ValueError('No features provided.')
# set list vrt of each scene
vrt_list = []
# iter stac scenes, build a vrt
for feat in feat_list:
# get scene identity and properties
f_id = feat.get('id')
f_props = feat.get('properties')
# get scene-level date
f_dt = f_props.get('datetime')
# get scene-level x, y parameters
f_x_size = f_props.get('proj:shape')[1]
f_y_size = f_props.get('proj:shape')[0]
# get scene-level epsg src as wkt
f_srs = CRS.from_epsg(f_props.get('proj:epsg'))
f_srs = f_srs.wkt
#from osgeo.osr import SpatialReference
#osr_crs = SpatialReference()
#osr_crs.ImportFromEPSG(f_props.get('proj:epsg'))
#f_srs = osr_crs.ExportToWkt()
# get scene-level transform
#from affine import Affine
aff = Affine(*f_props.get('proj:transform')[0:6])
f_transform = ', '.join(str(p) for p in Affine.to_gdal(aff))
# build a top-level vrt dataset xml object
xml_ds = satfetcher.make_vrt_dataset_xml(x_size=f_x_size,
y_size=f_y_size,
axis_map='1,2', # hardcoded
srs=f_srs,
trans=f_transform)
# iterate bands and build raster vrts
band_idx = 1
for band in bands:
if band in feat.get('assets'):
# get asset
asset = feat.get('assets').get(band)
# set dtype to int16... todo bug in rasterio with int8?
#a_dtype = 'UInt8' if band == 'oa_fmask' else 'Int16'
a_dtype = 'Int16'
# get asset raster x, y sizes
a_x_size = asset.get('proj:shape')[1]
a_y_size = asset.get('proj:shape')[0]
# get raster url, replace s3 with https
a_url = asset.get('href')
a_url = a_url.replace('s3://dea-public-data', 'https://data.dea.ga.gov.au')
# get nodata value
a_nodata = 0 if band == 'oa_fmask' else -999
# build raster xml
xml_rast = satfetcher.make_vrt_raster_xml(x_size=a_x_size,
y_size=a_y_size,
dtype=a_dtype,
band_num=band_idx,
nodata=a_nodata,
dt=f_dt,
rel_to_vrt=0, # hardcoded
url=a_url,
src_band=1) # hardcoded
# append raster xml to vrt dataset xml
xml_ds.append(xml_rast)
# increase band index
band_idx += 1
# decode to utf-8 string and append to vrt list
xml_ds = et.tostring(xml_ds).decode('utf-8')
vrt_list.append(xml_ds)
return vrt_list
|
83b2e8143bb63e152e569ab28af896509fa18a9f
| 3,643,181
|
def compute_relative_pose(cam_pose, ref_pose):
"""Compute relative pose between two cameras
Args:
cam_pose (np.ndarray): Extrinsic matrix of camera of interest C_i (3,4).
Transforms points in world frame to camera frame, i.e.
x_i = C_i @ x_w (taking into account homogeneous dimensions)
ref_pose (np.ndarray): Extrinsic matrix of reference camera C_r (3,4)
Returns:
relative_pose (np.ndarray): Relative pose of size (3,4). Should transform
points in C_r to C_i, i.e. x_i = M @ x_r
Prohibited functions:
Do NOT use np.linalg.inv() or similar functions
"""
relative_pose = np.zeros((3, 4), dtype=np.float64)
""" YOUR CODE STARTS HERE """
Ri, Rr = cam_pose[:, :-1], ref_pose[:, :-1]
ti, tr = cam_pose[:, -1:], ref_pose[:, -1:]
relative_pose[:, :-1] = Ri @ Rr.T
relative_pose[:, -1:] = ti - Ri @ Rr.T @ tr
""" YOUR CODE ENDS HERE """
return relative_pose
|
b185554b2961bd7cd70df5df714c176f2d5b6dcc
| 3,643,182
|
import sys
def compat_chr(item):
"""
This is necessary to maintain compatibility across Python 2.7 and 3.6.
In 3.6, 'chr' handles any unicode character, whereas in 2.7, `chr` only handles
ASCII characters. Thankfully, the Python 2.7 method `unichr` provides the same
functionality as 3.6 `chr`.
:param item: a length 1 string who's `chr` method needs to be invoked
:return: the unichr code point of the single character string, item
"""
if sys.version >= '3.0':
return chr(item)
else:
return unichr(item)
|
a5d45158cf0b48fd8863ac6356632080890fceee
| 3,643,183
|
def cli_cosmosdb_cassandra_table_update(client,
resource_group_name,
account_name,
keyspace_name,
table_name,
default_ttl=None,
schema=None,
analytical_storage_ttl=None):
"""Update an Azure Cosmos DB Cassandra table"""
logger.debug('reading Cassandra table')
cassandra_table = client.get_cassandra_table(resource_group_name, account_name, keyspace_name, table_name)
cassandra_table_resource = CassandraTableResource(id=table_name)
cassandra_table_resource.default_ttl = cassandra_table.resource.default_ttl
cassandra_table_resource.schema = cassandra_table.resource.schema
cassandra_table_resource.analytical_storage_ttl = cassandra_table.resource.analytical_storage_ttl
if _populate_cassandra_table_definition(cassandra_table_resource, default_ttl, schema, analytical_storage_ttl):
logger.debug('replacing Cassandra table')
cassandra_table_create_update_resource = CassandraTableCreateUpdateParameters(
resource=cassandra_table_resource,
options={})
return client.create_update_cassandra_table(resource_group_name,
account_name,
keyspace_name,
table_name,
cassandra_table_create_update_resource)
|
d1629aa39d9573f0cb0e99a10fc635679c15abdc
| 3,643,184
|
def determine_clim_by_standard_deviation(color_data, n_std_dev=2.5):
"""Automatically determine color limits based on number of standard
deviations from the mean of the color data (color_data). Useful if there
are outliers in the data causing difficulties in distinguishing most of
the data. Outputs vmin and vmax which can be passed to plotting routine
or plt.clim().
"""
color_data_mean = np.nanmean(color_data)
color_data_std = np.nanstd(color_data)
vmin = color_data_mean - n_std_dev * color_data_std
vmax = color_data_mean + n_std_dev * color_data_std
return vmin, vmax
|
1a8b1240c50a01f645862b7fce76bc93c62bcb26
| 3,643,185
|
def ec_double(point: ECPoint, alpha: int, p: int) -> ECPoint:
"""
Doubles a point on an elliptic curve with the equation y^2 = x^3 + alpha*x + beta mod p.
Assumes the point is given in affine form (x, y) and has y != 0.
"""
assert point[1] % p != 0
m = div_mod(3 * point[0] * point[0] + alpha, 2 * point[1], p)
x = (m * m - 2 * point[0]) % p
y = (m * (point[0] - x) - point[1]) % p
return x, y
|
4489ef72ceb1297983c5f4ac4132fc1e04105365
| 3,643,186
|
def scaled_dot_product_attention(q, k, v, mask):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Notice that mask must have the same dimensions as q, k, v.
e.g. if q, k, v are (batch_size, num_heads, seq_len, depth), then the mask
should be also (batch_size, num_heads, seq_len, depth).
However, if q, k, v are (batch_size, seq_len, depth), then the mask should
also not contain num_heads.
Returns:
output (a.k.a. context vectors), scaled_attention_logits
"""
# (..., seq_len_q, seq_len_k)
matmul_qk = tf.matmul(q, k, transpose_b=True)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# (..., seq_len_q, seq_len_k)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# (..., seq_len_q, depth_v)
output = tf.matmul(attention_weights, v)
return output, scaled_attention_logits
|
22110522c4f33ec30c076240ade20f5b66cb3fcd
| 3,643,187
|
import re
def _parse_message(message):
"""Parses the message.
Splits the message into separators and tags. Tags are named tuples
representing the string ^^type:name:format^^ and they are separated by
separators. For example, in
"123^^node:Foo:${file}^^456^^node:Bar:${line}^^789", there are two tags and
three separators. The separators are the numeric characters.
Supported tags after node:<node_name>
file: Replaced with the filename in which the node was defined.
line: Replaced by the line number at which the node was defined.
Args:
message: String to parse
Returns:
(list of separator strings, list of _ParseTags).
For example, if message is "123^^node:Foo:${file}^^456" then this function
returns (["123", "456"], [_ParseTag("node", "Foo", "${file}")])
"""
seps = []
tags = []
pos = 0
while pos < len(message):
match = re.match(_INTERPOLATION_PATTERN, message[pos:])
if match:
seps.append(match.group(1))
tags.append(_ParseTag(match.group(3), match.group(4), match.group(5)))
pos += match.end()
else:
break
seps.append(message[pos:])
return seps, tags
|
c961f2a49a21682eb247d4138646abd86135c560
| 3,643,188
|
def model_fn(features, labels, mode, params):
"""Model function."""
del labels, params
encoder_module = hub.Module(FLAGS.retriever_module_path)
block_emb = encoder_module(
inputs=dict(
input_ids=features["block_ids"],
input_mask=features["block_mask"],
segment_ids=features["block_segment_ids"]),
signature="projected")
predictions = dict(block_emb=block_emb)
return tf.estimator.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
|
ff40f74501f26e880a9cf1421a608240fd059fb8
| 3,643,189
|
def get_rotated_coords(vec, coords):
"""
Given the unit vector (in cartesian), 'vec', generates
the rotation matrix and rotates the given 'coords' to
align the z-axis along the unit vector, 'vec'
Args:
vec, coords - unit vector to rotate to, coordinates
Returns:
rot_coords: rotated coordinates
"""
rot = get_rotation_matrix(vec)
rot_coords = (rot @ coords.T).T
return rot_coords
|
f8504df4b7afef524e4147ce6303055a4b7a3cea
| 3,643,190
|
def merge_on_pids(all_pids, pdict, ddict):
"""
Helper function to merge dictionaries
all_pids: list of all patient ids
pdict, ddict: data dictionaries indexed by feature name
1) pdict[fname]: patient ids
2) ddict[fname]: data tensor corresponding to each patient
"""
set_ids = set(all_pids)
for fname in pdict:
set_ids = set_ids.intersection(set(pdict[fname]))
list_ids = list(set_ids)
list_ids.sort()
print ('merge_on_pids: intersection of patient ids is',len(list_ids))
maxT = 0
for fname in ddict:
maxT = np.max((maxT, ddict[fname][0].shape[1]))
data = np.zeros((len(list_ids), maxT, len(pdict.keys())))
obs = np.zeros_like(data)
for f_idx, fname in enumerate(pdict):
pids_f, (data_f, obs_f) = pdict[fname], ddict[fname]
pids_f = list(pids_f)
index_map = [pids_f.index(pid) for pid in list_ids]
data[:,:maxT, f_idx] = data_f[index_map, :maxT]
obs[:,:maxT, f_idx] = obs_f[index_map, :maxT]
print ('merge_on_pids: after merging, pat_ids, data, obs:', len(list_ids), data.shape, obs.shape)
return np.array(list_ids), data, obs
|
d0968de287a1c62ebb7638e5f1af7bd63041665c
| 3,643,191
|
import argparse
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Characterize the synapse pulse extender')
parser.add_argument("--syn_pd", dest="syn_pd", type=int, default=SYN_PD, help="Set DAC_SYN_PD bias. Default {}".format(SYN_PD))
args = parser.parse_args()
return args
|
bb616f6fcb2c82575777ba33797350768956834a
| 3,643,192
|
import requests
import numpy
def do_inference(hostport, work_dir, concurrency, num_tests):
"""Tests PredictionService over Tensor-Bridge.
Args:
hostport: Host:port address of the PredictionService.
work_dir: The full path of working directory for test data set.
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use.
Returns:
The classification error rate.
Raises:
IOError: An error occurred processing test data set.
"""
test_data_set = mnist_input_data.read_data_sets(work_dir).test
error = 0
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'default'
request.model_spec.signature_name = 'predict_images'
image, label = test_data_set.next_batch(1)
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
response = requests.post(hostport + '/tensor-bridge/v1/prediction',
json=MessageToDict(
request,
preserving_proto_field_name=True,
including_default_value_fields=True))
result = ParseDict(response.json(),
predict_pb2.PredictResponse(),
ignore_unknown_fields=True)
scores = numpy.array(
result.outputs['scores'].float_val)
prediction = numpy.argmax(scores)
if label[0] != prediction:
error += 1
return error / num_tests
|
ed2cc97ccaf6e3a8be2ae690b190640c67365f9d
| 3,643,193
|
def checkbox_2D(image, checkbox, debug=False):
"""
Find the course location of an input psf by finding the
brightest checkbox.
This function uses a 2 dimensional image as input, and
finds the the brightest checkbox of given size in the
image.
Keyword arguments:
image -- 2 dimensional psf image
checkbox -- A sliding partial filter that equal the sum
of values in an n x n region centered on the
current pixel, where n is an odd integer.
Output(s):
checkbox_ctr -- A tuple containing the brightest checkbox
location.
checkbox_hfw -- A tuple containing the checkbox halfwidth.
Example usage:
>> cb_cen, cb_hw = checkbox_2D(psf, 5)
Find the location of the brightest checkbox, given a
checkbox size of 5. Returns the brightest checkbox
center and halfwidths.
"""
# Calculate the checkbox half-width
chw = (checkbox - 1) / 2
# Calculate the image size
xsize, ysize = image.shape[1], image.shape[0]
# Calculate the x and y widths of checkbox region
xwidth, ywidth = xsize - checkbox + 1, ysize - checkbox + 1
# If the checkbox size is not equal to both the X and Y sizes,
# find the pixel with the brightest checkbox
if checkbox != xsize and checkbox != ysize:
xpeak = 0
ypeak = 0
sumpeak = 0
for ii in xrange(xsize - checkbox):
for jj in xrange(ysize - checkbox):
t = np.sum(image[jj:jj+checkbox, ii:ii+checkbox])
if t > sumpeak:
xpeak = ii + chw + 1
ypeak = jj + chw + 1
sumpeak = t
print('(checkbox_2D): Checkbox not equal to both x/ysize.')
print()
# If the checkbox size is equal to both the X and Y sizes
if checkbox == xsize and checkbox == ysize:
xpeak = xsize / 2
ypeak = ysize / 2
sumpeak = np.sum(image, axis=None)
print('(checkbox_2D): Checkbox equal to x/ysize.')
print()
# Print calculated checkbox center, and sum within checkbox centroid
# Find the checkbox region half-width in x and y
xhw = xwidth / 2
yhw = ywidth / 2
if xpeak < xhw or xpeak > xsize - xhw or ypeak < yhw or ypeak > ysize - yhw:
print('(checkbox_2D): WARNING - Peak too close to edge of image.')
print()
# NOTE: Use this section of the input image is a subset of a larger image
# Not currently needed for this analysis
# # Determine the center of the brightest checkbox, in extracted
# # image coordinates
# xpeak = xpeak + xhw
# ypeak = ypeak + yhw
# Debug messages
if debug:
print('(checkbox_2D): chw = ', chw)
print('(checkbox_2D): xsize, ysize = {}, {}'.format(xsize, ysize))
print('(checkbox_2D): xwidth, ywidth = {}, {}'.format(xwidth, ywidth))
print('(checkbox_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))
print('(checkbox_2D): sumpeak = ', sumpeak)
print('(checkbox_2D): xhw, yhw = {}, {}'.format(xhw, yhw))
print()
checkbox_ctr = np.array((xpeak, ypeak))
checkbox_hfw = np.array((xhw, yhw))
return checkbox_ctr, checkbox_hfw
|
e300cb3c7363686b8f07fd35d740bcbac84f0b06
| 3,643,194
|
def test_true() -> None:
"""This is a test that should always pass. This is just a default test
to make sure tests runs.
Parameters
----------
None
Returns
-------
None
"""
# Always true test.
assert_message = "This test should always pass."
assert True, assert_message
return None
|
f08cb5feb4e450b10b58fe32d751bf45985df84c
| 3,643,195
|
def parseStylesheetFile(filename):
"""Load and parse an XSLT stylesheet"""
ret = libxsltmod.xsltParseStylesheetFile(filename)
if ret == None: return None
return stylesheet(_obj=ret)
|
9e12e7ec5ace9eafe50e595d544bc09ff7ccef7d
| 3,643,196
|
import torch
def tensor_to_index(tensor: torch.tensor, dim=1) -> np.ndarray:
"""Converts a tensor to an array of category index"""
return tensor_to_longs(torch.argmax(tensor, dim=dim))
|
7d72b18086a46c4f1c3f8cebaee28ddac12cf31c
| 3,643,197
|
import glob
import random
import os
import time
import subprocess
import json
import gc
def matchAPKs(sourceAPK, targetAPKs, matchingDepth=1, matchingThreshold=0.67, matchWith=10, useSimiDroid=False, fastSearch=True, matchingTimeout=500, labeling="vt1-vt1", useLookup=False):
"""
Compares and attempts to match two APK's and returns a similarity measure
:param sourceAPK: The path to the source APK (the original app you wish to match)
:type sourceAPK: str
:param targetAPK: The path to the directory containing target APKs (against which you wish to match)
:type targetAPK: str
:param matchingDepth: The depth and rigorosity of the matching (between 1 and 4)
:type matchingDepth: int
:param matchingThreshold: A similarity percentage above which apps are considered similar
:type matchingThreshold: float
:param matchWith: The number of matchings to return (default: 1)
:type matchWith: int
:param useSimiDroid: Whether to use SimiDroid to perform the comparison
:type useSimiDroid: boolean
:param fastSearch: Whether to return matchings one maximum number of matches [matchWith] is reached
:type fastSearch: boolean
:param matchingTimeout: The time (in seconds) to allow the matching process to continue
:type matchingTimeoue: int
:param labeling: The labeling scheme adopted to label APK's as malicious and benign
:type labeling: str
:param useLookup: Whether to skip analyzing every app and depend on lookup structs to hasten the experiments
:type useLookup: boolean
:return: A list of tuples (str, (float, float) depicting the matched app, the similarity measure and the matched app's label
"""
try:
similarity = 0.0
# Get the target apps
targetApps = glob.glob("%s/*" % targetAPKs) if useSimiDroid == False else glob.glob("%s/*.apk" % targetAPKs)
# Randomize?
random.shuffle(targetApps)
if len(targetApps) < 1:
prettyPrint("Could not retrieve any APK's or directories from \"%s\"" % targetApps, "error")
return []
prettyPrint("Successfully retrieved %s apps from \"%s\"" % (len(targetApps), targetAPKs))
# Retrieve information from the source APK
if not useSimiDroid:
sourceKey = sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", "")
if useLookup:
infoDir = targetApps[0][:targetApps[0].rfind("/")]
if os.path.exists("%s/%s_data" % (infoDir, sourceKey)):
sourceInfo = eval(open("%s/%s_data/data.txt" % (infoDir, sourceKey)).read())
else:
prettyPrint("No lookup info found. Extracting app info", "warning")
sourceInfo = extractAPKInfo(sourceAPK, matchingDepth)[-1]
else:
sourceInfo = extractAPKInfo(sourceAPK, matchingDepth)[-1]
if len(sourceInfo) < 1:
prettyPrint("Could not extract any info from \"%s\"" % sourceAPK, "error")
return []
matchings = {}
counter = 0
startTime = time.time()
for targetAPK in targetApps:
counter += 1
# Timeout?
if counter >= matchingTimeout:
prettyPrint("Matching timeout", "error")
return sortDictByValue(matchings, True)
prettyPrint("Matching with \"%s\", #%s out of %s" % (targetAPK, counter, matchingTimeout), "debug")
if useSimiDroid == False:
# Use homemade recipe to perform the comparison
if not os.path.exists("%s/data.txt" % targetAPK):
prettyPrint("Could not find a \"data.txt\" file for app \"%s\". Skipping" % targetAPK, "warning")
continue
# Load pre-extracted target app information
try:
targetInfo = eval(open("%s/data.txt" % targetAPK).read())
except Exception as e:
prettyPrint("Could not load target info. Skipping", "warning")
continue
# Retrieve the APK's label according to a labeling scheme
targetLabel = -1
targetKey = targetAPK[targetAPK.rfind("/")+1:].replace("_data", "")
if os.path.exists("%s/%s.report" % (VT_REPORTS_DIR, targetKey)):
report = eval(open("%s/%s.report" % (VT_REPORTS_DIR, targetKey)).read())
prettyPrint("VirusTotal report \"%s.report\" found" % targetKey, "debug")
if "positives" in report.keys():
if labeling == "old":
if "additional_info" in report.keys():
if "positives_delta" in report["additional_info"].keys():
targetLabel = 1 if report["positives"] - report["additional_info"]["positives_delta"] >= 1 else 0
else:
continue
if labeling == "vt1-vt1":
targetLabel = 1 if report["positives"] >= 1 else 0
elif labeling == "vt50p-vt50p":
targetLabel = 1 if report["positives"]/float(report["total"]) >= 0.5 else 0
elif labeling == "vt50p-vt1":
if report["positives"]/float(report["total"]) >= 0.5:
targetLabel = 1
elif report["positives"] == 0:
targetLabel = 0
else:
targetLabel = random.randint(0, 1)
# Start the comparison
similarities = []
if matchingDepth >= 1:
if "name" in sourceInfo.keys() and "name" in targetInfo.keys():
similarities.append(stringRatio(sourceInfo["name"], targetInfo["name"]))
if "package" in sourceInfo.keys() and "package" in targetInfo.keys():
similarities.append(stringRatio(sourceInfo["package"], targetInfo["package"]))
if "icon" in sourceInfo.keys() and "icon" in targetInfo.keys():
if sourceInfo["icon"] != None and targetInfo["icon"] != None:
sourceIcon = "%s/tmp_%s/%s" % (sourceAPK[:sourceAPK.rfind("/")], sourceInfo["package"], sourceInfo["icon"])
targetIcon = "%s/%s" % (targetAPK, targetInfo["icon"][targetInfo["icon"].rfind('/')+1:])
if os.path.exists(sourceIcon) and os.path.exists(targetIcon):
similarities.append(simImages(sourceIcon, targetIcon))
if matchingDepth >= 2:
if "activities" in sourceInfo.keys() and "activities" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["activities"], targetInfo["activities"]))
if "permissions" in sourceInfo.keys() and "permissions" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["permissions"], targetInfo["permissions"]))
if "providers" in sourceInfo.keys() and "providers" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["providers"], targetInfo["providers"]))
if "receivers" in sourceInfo.keys() and "receivers" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["receivers"], targetInfo["receivers"]))
if "services" in sourceInfo.keys() and "services" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["services"], targetInfo["services"]))
if "files" in sourceInfo.keys() and "files" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["files"], targetInfo["files"]))
if matchingDepth >= 3:
if "libraries" in sourceInfo.keys() and "libraries" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["libraries"], targetInfo["libraries"]))
if "classes" in sourceInfo.keys() and "classes" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["classes"], targetInfo["classes"]))
if "methods" in sourceInfo.keys() and "methods" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["methods"], targetInfo["methods"]))
if matchingDepth >= 4:
if os.path.exists("%s/%s_data/call_graph.gpickle" % (infoDir, sourceKey)) and os.path.exists("%s/call_graph.gpickle" % targetAPK):
try:
prettyPrint("Loading source graph from \"%s/%s_data/call_graph.gpickle\"" % (infoDir, sourceKey), "debug")
sourceGraph = nx.read_gpickle("%s/%s_data/call_graph.gpickle" % (infoDir, sourceKey))
prettyPrint("Loading target graph from \"%s/call_graph.gpickle\"" % targetAPK, "debug")
targetGraph = nx.read_gpickle("%s/call_graph.gpickle" % targetAPK)
except exceptions.EOFError as e:
prettyPrint("Could not read call source or target graphs. Skipping", "warning")
continue
if fastSearch:
isomorphic = nx.algorithms.could_be_isomorphic(sourceGraph, targetGraph)
else:
isomorphic = nx.algorithms.is_isomorphic(sourceGraph, targetGraph)
if isomorphic:
similarities.append(1.0)
else:
similarities.append(0.0)
else:
# Use SimiDroid to perform comparison
curDir = os.path.abspath(".")
os.chdir(SIMIDROID_DIR)
cmd = "java -jar SimiDroid.jar %s %s" % (sourceAPK, targetAPK)
outFile = "%s-%s.json" % (sourceAPK[sourceAPK.rfind('/')+1:].replace(".apk", ""), targetAPK[targetAPK.rfind("/")+1:].replace(".apk", ""))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.communicate()
if not os.path.exists(outFile):
prettyPrint("Could not find SimiDroid output file. Skipping", "warning")
continue
outContent = json.loads(open(outFile).read())
os.chdir(curDir)
if len(similarities) >= 1:
similarity = float(sum(similarities))/float(len(similarities)) if useSimiDroid == False else float(outContent["conclusion"]["simiScore"])
else:
similarity = 0.0
prettyPrint("Similarity score: %s" % similarity)
# Delete targetInfo to free memory?
prettyPrint("Releasing object and invoking Garbage Collector", "debug")
targetGraph = None
gc.collect()
if similarity >= matchingThreshold:
prettyPrint("Got a match between source \"%s\" and app \"%s\", with score %s" % (sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", ""), targetAPK[targetAPK.rfind("/")+1:].replace(".apk", ""), similarity), "output")
if useSimiDroid == False:
matchings[targetInfo["package"]] = (similarity, targetLabel)
else:
matchings[targetAPK] = (similarity, targetLabel)
currentTime = time.time()
if (fastSearch and len(matchings) >= matchWith) or (currentTime - startTime >= matchingTimeout):
# Return what we've got so far
if len(matchings) >= matchWith:
return sortDictByValue(matchings, True)
except Exception as e:
prettyPrintError(e)
return []
return sortDictByValue(matchings, True)
|
a1cf2a9bde0bc0bfcda761097db4cd8f281c8d6f
| 3,643,198
|
def _le_(x: symbol, y: symbol) -> symbol:
"""
>>> isinstance(le_(symbol(3), symbol(2)), symbol)
True
>>> le_.instance(3, 2)
False
"""
return x <= y
|
336b164cbc249a1a9e9a3d965950a52ac01292ab
| 3,643,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.