content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def determine_clim_by_standard_deviation(color_data, n_std_dev=2.5):
"""Automatically determine color limits based on number of standard
deviations from the mean of the color data (color_data). Useful if there
are outliers in the data causing difficulties in distinguishing most of
the data. Outputs vmin and vmax which can be passed to plotting routine
or plt.clim().
"""
color_data_mean = np.nanmean(color_data)
color_data_std = np.nanstd(color_data)
vmin = color_data_mean - n_std_dev * color_data_std
vmax = color_data_mean + n_std_dev * color_data_std
return vmin, vmax | 5,335,100 |
def test_TDMA_solver():
"""
Test the matrix solver function solve()
"""
# define test matrix
test_a = [[0, -2.6, 1], [1, -2.6, 1], [1, -2.6, 1], [1, -2.6, 0]]
test_b = [-240.0, 0.0, 0.0, -150.0]
# define the solution to the test matrix
correct_x = [118.1122, 67.0916, 56.3261, 79.3562]
# run the TDMA solver
x = solve_TDMA(len(test_b), np.asarray(test_a), np.asarray(test_b))
# test the solution with correct answer
assert x == pytest.approx(correct_x, abs=1e-4) | 5,335,101 |
def dt(c):
"""
Remove all models on models folder and retrain
"""
c.run("rm -f models/*", pty=True)
print("All model files removed.")
t(c) | 5,335,102 |
def ec_double(point: ECPoint, alpha: int, p: int) -> ECPoint:
"""
Doubles a point on an elliptic curve with the equation y^2 = x^3 + alpha*x + beta mod p.
Assumes the point is given in affine form (x, y) and has y != 0.
"""
assert point[1] % p != 0
m = div_mod(3 * point[0] * point[0] + alpha, 2 * point[1], p)
x = (m * m - 2 * point[0]) % p
y = (m * (point[0] - x) - point[1]) % p
return x, y | 5,335,103 |
def scaled_dot_product_attention(q, k, v, mask):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Notice that mask must have the same dimensions as q, k, v.
e.g. if q, k, v are (batch_size, num_heads, seq_len, depth), then the mask
should be also (batch_size, num_heads, seq_len, depth).
However, if q, k, v are (batch_size, seq_len, depth), then the mask should
also not contain num_heads.
Returns:
output (a.k.a. context vectors), scaled_attention_logits
"""
# (..., seq_len_q, seq_len_k)
matmul_qk = tf.matmul(q, k, transpose_b=True)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# (..., seq_len_q, seq_len_k)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# (..., seq_len_q, depth_v)
output = tf.matmul(attention_weights, v)
return output, scaled_attention_logits | 5,335,104 |
def _parse_message(message):
"""Parses the message.
Splits the message into separators and tags. Tags are named tuples
representing the string ^^type:name:format^^ and they are separated by
separators. For example, in
"123^^node:Foo:${file}^^456^^node:Bar:${line}^^789", there are two tags and
three separators. The separators are the numeric characters.
Supported tags after node:<node_name>
file: Replaced with the filename in which the node was defined.
line: Replaced by the line number at which the node was defined.
Args:
message: String to parse
Returns:
(list of separator strings, list of _ParseTags).
For example, if message is "123^^node:Foo:${file}^^456" then this function
returns (["123", "456"], [_ParseTag("node", "Foo", "${file}")])
"""
seps = []
tags = []
pos = 0
while pos < len(message):
match = re.match(_INTERPOLATION_PATTERN, message[pos:])
if match:
seps.append(match.group(1))
tags.append(_ParseTag(match.group(3), match.group(4), match.group(5)))
pos += match.end()
else:
break
seps.append(message[pos:])
return seps, tags | 5,335,105 |
def plot_simp_pat2D():
"""Test 2D plot of the tangential vector function given by the
spherical harmonic function Psi in vsh package."""
THETA, PHI, E_th, E_ph = gen_simp_pat()
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph) | 5,335,106 |
def model_fn(features, labels, mode, params):
"""Model function."""
del labels, params
encoder_module = hub.Module(FLAGS.retriever_module_path)
block_emb = encoder_module(
inputs=dict(
input_ids=features["block_ids"],
input_mask=features["block_mask"],
segment_ids=features["block_segment_ids"]),
signature="projected")
predictions = dict(block_emb=block_emb)
return tf.estimator.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) | 5,335,107 |
def get_rotated_coords(vec, coords):
"""
Given the unit vector (in cartesian), 'vec', generates
the rotation matrix and rotates the given 'coords' to
align the z-axis along the unit vector, 'vec'
Args:
vec, coords - unit vector to rotate to, coordinates
Returns:
rot_coords: rotated coordinates
"""
rot = get_rotation_matrix(vec)
rot_coords = (rot @ coords.T).T
return rot_coords | 5,335,108 |
def SetDrainFlag(drain_flag):
"""Sets the drain flag for the queue.
@type drain_flag: boolean
@param drain_flag: Whether to set or unset the drain flag
@attention: This function should only called the current holder of the queue
lock
"""
getents = runtime.GetEnts()
if drain_flag:
utils.WriteFile(pathutils.JOB_QUEUE_DRAIN_FILE, data="",
uid=getents.masterd_uid, gid=getents.daemons_gid,
mode=constants.JOB_QUEUE_FILES_PERMS)
else:
utils.RemoveFile(pathutils.JOB_QUEUE_DRAIN_FILE)
assert (not drain_flag) ^ CheckDrainFlag() | 5,335,109 |
def journalMethodCall(objectPath: str, methodName: str, args: tuple, kargs: str):
"""This function may be used by a user-defined command to record itself in the Abaqus
journal file.
For example
def setValues( self, **kargs ):
for arg,value in kargs.items():
setattr(arg, value)
from abaqus import journalMethodCall
objPath = '%s[%r]' % (self.reposPath, self.name)
journalMethodCall(objPath, 'setValues', (), kargs)
Note:Your command should not call journalMethodCall if the command changes the mdb using
built-in Abaqus Scripting Interface commands, because built-in commands are journaled by
default. A command that changes the mdb customData is one example of a command that
should call journalMethodCall.
Notes
-----
This function can be accessed by:
.. code-block:: python
journalMethodCall
Parameters
----------
objectPath
A String specifying the path to the object.
methodName
A String specifying the name of the method.
args
A sequence specifying the positional arguments to be written to the journal file.
kargs
A Python dict object specifying the keyword arguments to be written to the journal file.
"""
pass | 5,335,110 |
def merge_on_pids(all_pids, pdict, ddict):
"""
Helper function to merge dictionaries
all_pids: list of all patient ids
pdict, ddict: data dictionaries indexed by feature name
1) pdict[fname]: patient ids
2) ddict[fname]: data tensor corresponding to each patient
"""
set_ids = set(all_pids)
for fname in pdict:
set_ids = set_ids.intersection(set(pdict[fname]))
list_ids = list(set_ids)
list_ids.sort()
print ('merge_on_pids: intersection of patient ids is',len(list_ids))
maxT = 0
for fname in ddict:
maxT = np.max((maxT, ddict[fname][0].shape[1]))
data = np.zeros((len(list_ids), maxT, len(pdict.keys())))
obs = np.zeros_like(data)
for f_idx, fname in enumerate(pdict):
pids_f, (data_f, obs_f) = pdict[fname], ddict[fname]
pids_f = list(pids_f)
index_map = [pids_f.index(pid) for pid in list_ids]
data[:,:maxT, f_idx] = data_f[index_map, :maxT]
obs[:,:maxT, f_idx] = obs_f[index_map, :maxT]
print ('merge_on_pids: after merging, pat_ids, data, obs:', len(list_ids), data.shape, obs.shape)
return np.array(list_ids), data, obs | 5,335,111 |
def dump(columns, fp, name=None, labels=None, formats=None):
"""
Serialize a SAS compressed transport file format document.
data = {
'a': [1, 2],
'b': [3, 4],
}
with open('example.cpt', 'wb') as f:
dump(data, f)
"""
raise NotImplementedError() | 5,335,112 |
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Characterize the synapse pulse extender')
parser.add_argument("--syn_pd", dest="syn_pd", type=int, default=SYN_PD, help="Set DAC_SYN_PD bias. Default {}".format(SYN_PD))
args = parser.parse_args()
return args | 5,335,113 |
def add_target_resources(document):
"""Add fragmentless target URL values to make search easier."""
if oajson.is_collection(document):
for item in document.get(oajson.ITEMS, []):
add_target_resources(item)
else:
target = document.get('target')
if target is None:
return
assert TARGET_RESOURCE not in document
# TODO: support multiple and structured targets
if not isinstance(target, basestring):
raise NotImplementedError('multiple/structured targets')
document[TARGET_RESOURCE] = urlparse.urldefrag(target)[0] | 5,335,114 |
def do_inference(hostport, work_dir, concurrency, num_tests):
"""Tests PredictionService over Tensor-Bridge.
Args:
hostport: Host:port address of the PredictionService.
work_dir: The full path of working directory for test data set.
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use.
Returns:
The classification error rate.
Raises:
IOError: An error occurred processing test data set.
"""
test_data_set = mnist_input_data.read_data_sets(work_dir).test
error = 0
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'default'
request.model_spec.signature_name = 'predict_images'
image, label = test_data_set.next_batch(1)
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
response = requests.post(hostport + '/tensor-bridge/v1/prediction',
json=MessageToDict(
request,
preserving_proto_field_name=True,
including_default_value_fields=True))
result = ParseDict(response.json(),
predict_pb2.PredictResponse(),
ignore_unknown_fields=True)
scores = numpy.array(
result.outputs['scores'].float_val)
prediction = numpy.argmax(scores)
if label[0] != prediction:
error += 1
return error / num_tests | 5,335,115 |
def checkbox_2D(image, checkbox, debug=False):
"""
Find the course location of an input psf by finding the
brightest checkbox.
This function uses a 2 dimensional image as input, and
finds the the brightest checkbox of given size in the
image.
Keyword arguments:
image -- 2 dimensional psf image
checkbox -- A sliding partial filter that equal the sum
of values in an n x n region centered on the
current pixel, where n is an odd integer.
Output(s):
checkbox_ctr -- A tuple containing the brightest checkbox
location.
checkbox_hfw -- A tuple containing the checkbox halfwidth.
Example usage:
>> cb_cen, cb_hw = checkbox_2D(psf, 5)
Find the location of the brightest checkbox, given a
checkbox size of 5. Returns the brightest checkbox
center and halfwidths.
"""
# Calculate the checkbox half-width
chw = (checkbox - 1) / 2
# Calculate the image size
xsize, ysize = image.shape[1], image.shape[0]
# Calculate the x and y widths of checkbox region
xwidth, ywidth = xsize - checkbox + 1, ysize - checkbox + 1
# If the checkbox size is not equal to both the X and Y sizes,
# find the pixel with the brightest checkbox
if checkbox != xsize and checkbox != ysize:
xpeak = 0
ypeak = 0
sumpeak = 0
for ii in xrange(xsize - checkbox):
for jj in xrange(ysize - checkbox):
t = np.sum(image[jj:jj+checkbox, ii:ii+checkbox])
if t > sumpeak:
xpeak = ii + chw + 1
ypeak = jj + chw + 1
sumpeak = t
print('(checkbox_2D): Checkbox not equal to both x/ysize.')
print()
# If the checkbox size is equal to both the X and Y sizes
if checkbox == xsize and checkbox == ysize:
xpeak = xsize / 2
ypeak = ysize / 2
sumpeak = np.sum(image, axis=None)
print('(checkbox_2D): Checkbox equal to x/ysize.')
print()
# Print calculated checkbox center, and sum within checkbox centroid
# Find the checkbox region half-width in x and y
xhw = xwidth / 2
yhw = ywidth / 2
if xpeak < xhw or xpeak > xsize - xhw or ypeak < yhw or ypeak > ysize - yhw:
print('(checkbox_2D): WARNING - Peak too close to edge of image.')
print()
# NOTE: Use this section of the input image is a subset of a larger image
# Not currently needed for this analysis
# # Determine the center of the brightest checkbox, in extracted
# # image coordinates
# xpeak = xpeak + xhw
# ypeak = ypeak + yhw
# Debug messages
if debug:
print('(checkbox_2D): chw = ', chw)
print('(checkbox_2D): xsize, ysize = {}, {}'.format(xsize, ysize))
print('(checkbox_2D): xwidth, ywidth = {}, {}'.format(xwidth, ywidth))
print('(checkbox_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))
print('(checkbox_2D): sumpeak = ', sumpeak)
print('(checkbox_2D): xhw, yhw = {}, {}'.format(xhw, yhw))
print()
checkbox_ctr = np.array((xpeak, ypeak))
checkbox_hfw = np.array((xhw, yhw))
return checkbox_ctr, checkbox_hfw | 5,335,116 |
def test_true() -> None:
"""This is a test that should always pass. This is just a default test
to make sure tests runs.
Parameters
----------
None
Returns
-------
None
"""
# Always true test.
assert_message = "This test should always pass."
assert True, assert_message
return None | 5,335,117 |
def parseStylesheetFile(filename):
"""Load and parse an XSLT stylesheet"""
ret = libxsltmod.xsltParseStylesheetFile(filename)
if ret == None: return None
return stylesheet(_obj=ret) | 5,335,118 |
def tensor_to_index(tensor: torch.tensor, dim=1) -> np.ndarray:
"""Converts a tensor to an array of category index"""
return tensor_to_longs(torch.argmax(tensor, dim=dim)) | 5,335,119 |
def write_ef_first_stage_solution(ef,
solution_file_name,
first_stage_solution_writer=first_stage_nonant_writer):
"""
Write a solution file, if a solution is available, to the solution_file_name provided
Args:
ef : A Concrete Model of the Extensive Form (output of create_EF).
We assume it has already been solved.
solution_file_name : filename to write the solution to
first_stage_solution_writer (optional) : custom first stage solution writer function
NOTE:
This utility is replicating WheelSpinner.write_first_stage_solution for EF
"""
if not haveMPI or (global_rank==0):
dirname = os.path.dirname(solution_file_name)
if dirname != '':
os.makedirs(os.path.dirname(solution_file_name), exist_ok=True)
representative_scenario = getattr(ef,ef._ef_scenario_names[0])
first_stage_solution_writer(solution_file_name,
representative_scenario,
bundling=False) | 5,335,120 |
def create_superuser(site):
"""Create an initial superuser for the site.
This will ask for a username, password, and e-mail address for the
initial superuser account.
If a superuser already exists (due to re-running this script on an
existing database), it will be displayed for reference, and the user
will be instructed on how to create a new one.
Args:
site (reviewboard.cmdline.rbsite.Site):
The site to create the superuser on.
"""
from django.contrib.auth.management import get_default_username
from django.contrib.auth.models import User
page = ui.page('Set up a superuser account')
admins = list(
User.objects.filter(is_superuser=True)
.values_list('username', flat=True)
)
if admins:
ui.text(page,
'Existing admin account(s) were found: %s'
% ', '.join(admins))
ui.text(page,
'To create a new one, run `./reviewboard/manage.py '
'createsuperuser`')
else:
ui.text(page,
"Now you'll need to set up a superuser (an admin account). "
"This will be used to log in and configure Review Board.")
ui.prompt_input(page,
'Username',
default=get_default_username() or 'admin',
save_obj=site,
save_var='admin_user')
ui.prompt_input(page,
'Password',
password=True,
save_obj=site,
save_var='admin_password')
ui.prompt_input(page, 'Confirm Password',
password=True,
save_obj=site,
save_var='reenter_admin_password')
ui.prompt_input(page, 'E-Mail Address',
save_obj=site,
save_var='admin_email')
site.create_admin_user() | 5,335,121 |
def _process_json(outdata, **kwargs):
"""Function: _process_json
Description: Private function for chk_slv_time(). Process JSON data.
Arguments:
(input) outdata -> JSON document of Check Slave Time output.
(input) **kwargs:
ofile -> file name - Name of output file.
db_tbl -> database:collection - Name of db and collection.
class_cfg -> Server class configuration settings.
mail -> Mail instance.
sup_std -> Suppress standard out.
mode -> File write mode.
indent -> Indentation level for JSON document.
"""
indent = kwargs.get("indent", None)
jdata = json.dumps(outdata, indent=indent)
mongo_cfg = kwargs.get("class_cfg", None)
db_tbl = kwargs.get("db_tbl", None)
ofile = kwargs.get("ofile", None)
mail = kwargs.get("mail", None)
mode = kwargs.get("mode", "w")
if mongo_cfg and db_tbl:
dbn, tbl = db_tbl.split(":")
status = mongo_libs.ins_doc(mongo_cfg, dbn, tbl, outdata)
if not status[0]:
print("\n_process_json: Error Detected: %s" % (status[1]))
if ofile:
gen_libs.write_file(ofile, mode, jdata)
if mail:
mail.add_2_msg(jdata)
mail.send_mail()
if not kwargs.get("sup_std", False):
gen_libs.print_data(jdata) | 5,335,122 |
def matchAPKs(sourceAPK, targetAPKs, matchingDepth=1, matchingThreshold=0.67, matchWith=10, useSimiDroid=False, fastSearch=True, matchingTimeout=500, labeling="vt1-vt1", useLookup=False):
"""
Compares and attempts to match two APK's and returns a similarity measure
:param sourceAPK: The path to the source APK (the original app you wish to match)
:type sourceAPK: str
:param targetAPK: The path to the directory containing target APKs (against which you wish to match)
:type targetAPK: str
:param matchingDepth: The depth and rigorosity of the matching (between 1 and 4)
:type matchingDepth: int
:param matchingThreshold: A similarity percentage above which apps are considered similar
:type matchingThreshold: float
:param matchWith: The number of matchings to return (default: 1)
:type matchWith: int
:param useSimiDroid: Whether to use SimiDroid to perform the comparison
:type useSimiDroid: boolean
:param fastSearch: Whether to return matchings one maximum number of matches [matchWith] is reached
:type fastSearch: boolean
:param matchingTimeout: The time (in seconds) to allow the matching process to continue
:type matchingTimeoue: int
:param labeling: The labeling scheme adopted to label APK's as malicious and benign
:type labeling: str
:param useLookup: Whether to skip analyzing every app and depend on lookup structs to hasten the experiments
:type useLookup: boolean
:return: A list of tuples (str, (float, float) depicting the matched app, the similarity measure and the matched app's label
"""
try:
similarity = 0.0
# Get the target apps
targetApps = glob.glob("%s/*" % targetAPKs) if useSimiDroid == False else glob.glob("%s/*.apk" % targetAPKs)
# Randomize?
random.shuffle(targetApps)
if len(targetApps) < 1:
prettyPrint("Could not retrieve any APK's or directories from \"%s\"" % targetApps, "error")
return []
prettyPrint("Successfully retrieved %s apps from \"%s\"" % (len(targetApps), targetAPKs))
# Retrieve information from the source APK
if not useSimiDroid:
sourceKey = sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", "")
if useLookup:
infoDir = targetApps[0][:targetApps[0].rfind("/")]
if os.path.exists("%s/%s_data" % (infoDir, sourceKey)):
sourceInfo = eval(open("%s/%s_data/data.txt" % (infoDir, sourceKey)).read())
else:
prettyPrint("No lookup info found. Extracting app info", "warning")
sourceInfo = extractAPKInfo(sourceAPK, matchingDepth)[-1]
else:
sourceInfo = extractAPKInfo(sourceAPK, matchingDepth)[-1]
if len(sourceInfo) < 1:
prettyPrint("Could not extract any info from \"%s\"" % sourceAPK, "error")
return []
matchings = {}
counter = 0
startTime = time.time()
for targetAPK in targetApps:
counter += 1
# Timeout?
if counter >= matchingTimeout:
prettyPrint("Matching timeout", "error")
return sortDictByValue(matchings, True)
prettyPrint("Matching with \"%s\", #%s out of %s" % (targetAPK, counter, matchingTimeout), "debug")
if useSimiDroid == False:
# Use homemade recipe to perform the comparison
if not os.path.exists("%s/data.txt" % targetAPK):
prettyPrint("Could not find a \"data.txt\" file for app \"%s\". Skipping" % targetAPK, "warning")
continue
# Load pre-extracted target app information
try:
targetInfo = eval(open("%s/data.txt" % targetAPK).read())
except Exception as e:
prettyPrint("Could not load target info. Skipping", "warning")
continue
# Retrieve the APK's label according to a labeling scheme
targetLabel = -1
targetKey = targetAPK[targetAPK.rfind("/")+1:].replace("_data", "")
if os.path.exists("%s/%s.report" % (VT_REPORTS_DIR, targetKey)):
report = eval(open("%s/%s.report" % (VT_REPORTS_DIR, targetKey)).read())
prettyPrint("VirusTotal report \"%s.report\" found" % targetKey, "debug")
if "positives" in report.keys():
if labeling == "old":
if "additional_info" in report.keys():
if "positives_delta" in report["additional_info"].keys():
targetLabel = 1 if report["positives"] - report["additional_info"]["positives_delta"] >= 1 else 0
else:
continue
if labeling == "vt1-vt1":
targetLabel = 1 if report["positives"] >= 1 else 0
elif labeling == "vt50p-vt50p":
targetLabel = 1 if report["positives"]/float(report["total"]) >= 0.5 else 0
elif labeling == "vt50p-vt1":
if report["positives"]/float(report["total"]) >= 0.5:
targetLabel = 1
elif report["positives"] == 0:
targetLabel = 0
else:
targetLabel = random.randint(0, 1)
# Start the comparison
similarities = []
if matchingDepth >= 1:
if "name" in sourceInfo.keys() and "name" in targetInfo.keys():
similarities.append(stringRatio(sourceInfo["name"], targetInfo["name"]))
if "package" in sourceInfo.keys() and "package" in targetInfo.keys():
similarities.append(stringRatio(sourceInfo["package"], targetInfo["package"]))
if "icon" in sourceInfo.keys() and "icon" in targetInfo.keys():
if sourceInfo["icon"] != None and targetInfo["icon"] != None:
sourceIcon = "%s/tmp_%s/%s" % (sourceAPK[:sourceAPK.rfind("/")], sourceInfo["package"], sourceInfo["icon"])
targetIcon = "%s/%s" % (targetAPK, targetInfo["icon"][targetInfo["icon"].rfind('/')+1:])
if os.path.exists(sourceIcon) and os.path.exists(targetIcon):
similarities.append(simImages(sourceIcon, targetIcon))
if matchingDepth >= 2:
if "activities" in sourceInfo.keys() and "activities" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["activities"], targetInfo["activities"]))
if "permissions" in sourceInfo.keys() and "permissions" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["permissions"], targetInfo["permissions"]))
if "providers" in sourceInfo.keys() and "providers" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["providers"], targetInfo["providers"]))
if "receivers" in sourceInfo.keys() and "receivers" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["receivers"], targetInfo["receivers"]))
if "services" in sourceInfo.keys() and "services" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["services"], targetInfo["services"]))
if "files" in sourceInfo.keys() and "files" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["files"], targetInfo["files"]))
if matchingDepth >= 3:
if "libraries" in sourceInfo.keys() and "libraries" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["libraries"], targetInfo["libraries"]))
if "classes" in sourceInfo.keys() and "classes" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["classes"], targetInfo["classes"]))
if "methods" in sourceInfo.keys() and "methods" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["methods"], targetInfo["methods"]))
if matchingDepth >= 4:
if os.path.exists("%s/%s_data/call_graph.gpickle" % (infoDir, sourceKey)) and os.path.exists("%s/call_graph.gpickle" % targetAPK):
try:
prettyPrint("Loading source graph from \"%s/%s_data/call_graph.gpickle\"" % (infoDir, sourceKey), "debug")
sourceGraph = nx.read_gpickle("%s/%s_data/call_graph.gpickle" % (infoDir, sourceKey))
prettyPrint("Loading target graph from \"%s/call_graph.gpickle\"" % targetAPK, "debug")
targetGraph = nx.read_gpickle("%s/call_graph.gpickle" % targetAPK)
except exceptions.EOFError as e:
prettyPrint("Could not read call source or target graphs. Skipping", "warning")
continue
if fastSearch:
isomorphic = nx.algorithms.could_be_isomorphic(sourceGraph, targetGraph)
else:
isomorphic = nx.algorithms.is_isomorphic(sourceGraph, targetGraph)
if isomorphic:
similarities.append(1.0)
else:
similarities.append(0.0)
else:
# Use SimiDroid to perform comparison
curDir = os.path.abspath(".")
os.chdir(SIMIDROID_DIR)
cmd = "java -jar SimiDroid.jar %s %s" % (sourceAPK, targetAPK)
outFile = "%s-%s.json" % (sourceAPK[sourceAPK.rfind('/')+1:].replace(".apk", ""), targetAPK[targetAPK.rfind("/")+1:].replace(".apk", ""))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.communicate()
if not os.path.exists(outFile):
prettyPrint("Could not find SimiDroid output file. Skipping", "warning")
continue
outContent = json.loads(open(outFile).read())
os.chdir(curDir)
if len(similarities) >= 1:
similarity = float(sum(similarities))/float(len(similarities)) if useSimiDroid == False else float(outContent["conclusion"]["simiScore"])
else:
similarity = 0.0
prettyPrint("Similarity score: %s" % similarity)
# Delete targetInfo to free memory?
prettyPrint("Releasing object and invoking Garbage Collector", "debug")
targetGraph = None
gc.collect()
if similarity >= matchingThreshold:
prettyPrint("Got a match between source \"%s\" and app \"%s\", with score %s" % (sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", ""), targetAPK[targetAPK.rfind("/")+1:].replace(".apk", ""), similarity), "output")
if useSimiDroid == False:
matchings[targetInfo["package"]] = (similarity, targetLabel)
else:
matchings[targetAPK] = (similarity, targetLabel)
currentTime = time.time()
if (fastSearch and len(matchings) >= matchWith) or (currentTime - startTime >= matchingTimeout):
# Return what we've got so far
if len(matchings) >= matchWith:
return sortDictByValue(matchings, True)
except Exception as e:
prettyPrintError(e)
return []
return sortDictByValue(matchings, True) | 5,335,123 |
def _le_(x: symbol, y: symbol) -> symbol:
"""
>>> isinstance(le_(symbol(3), symbol(2)), symbol)
True
>>> le_.instance(3, 2)
False
"""
return x <= y | 5,335,124 |
def convert_country_codes(source_codes: List[str], source_format: str, target_format: str,
throw_error: bool = False) -> List[str]:
"""
Convert country codes, e.g., from ISO_2 to full name.
Parameters
----------
source_codes: List[str]
List of codes to convert.
source_format: str
Format of the source codes (alpha_2, alpha_3, name, ...)
target_format: str
Format to which code must be converted (alpha_2, alpha_3, name, ...)
throw_error: bool (default: False)
Whether to throw an error if an attribute does not exist.
Returns
-------
target_codes: List[str]
List of converted codes.
"""
target_codes = []
for code in source_codes:
try:
country_codes = pyc.countries.get(**{source_format: code})
if country_codes is None:
raise KeyError(f"Data is not available for code {code} of type {source_format}.")
target_code = getattr(country_codes, target_format)
except (KeyError, AttributeError) as e:
if throw_error:
raise e
target_code = np.nan
target_codes += [target_code]
return target_codes | 5,335,125 |
def test_cartpole_dynamics_deviation():
"""Difference between analytical dynamics model and ground truth model.
"""
import matplotlib.pyplot as plt
from munch import munchify
from safe_il.envs.cartpole import CartPole
config = {
"seed": 1234,
"env_config": {
"normalized_action": True,
},
}
config = munchify(config)
env = CartPole(**config.env_config)
env.seed(config.seed)
obs, info = env.reset()
obs2 = torch.as_tensor(obs)
model = env.model
diffs = []
test_steps = 1000
for i in range(test_steps):
action = env.action_space.sample() * 0
obs, _, done, info = env.step(action)
obs2 = model(obs2, torch.as_tensor(action))
diffs.append(obs - obs2.numpy())
if done:
break
env.close()
x = list(range(len(diffs)))
for i in range(4):
plt.subplot(2, 2, i + 1)
plt.plot(x, [float(d[i]) for d in diffs])
plt.savefig("figures/test_cartpole_dynamics_deviation.png")
plt.show() | 5,335,126 |
def _test_request(op):
"""Make a request to a wsgiref.simple_server and attempt to call
op(req) in the application. Succeed if the operation does not
time out."""
app = _make_test_app(op)
server = _make_test_server(app)
worker = threading.Thread(target=server.handle_request)
worker.setDaemon(True)
worker.start()
url = "http://localhost:%d/" % server.server_port
try:
resp = urllib2.urlopen(url, timeout=3)
assert resp.read() == "ok"
finally:
server.socket.close()
worker.join(1)
if worker.isAlive():
log.debug('worker is hanged') | 5,335,127 |
def get_simple_lca_length(std_tree, test_gold_dict, node1, node2):
"""
get the corresponding node of node1 and node2 on std tree.
calculate the lca distance between them
Exception:
Exception("[Error: ] std has not been lca initialized yet")
std tree need to be initialized before running this function
example:
std_tree.get_lca_preprocess()
"""
if std_tree.depth_array is None:
raise Exception("[Error: ] std has not been lca initialized yet")
std_id_node_dict = std_tree.get_id_node_dict()
tmp_node1 = node1
tmp_node2 = test_gold_dict[node2]
if tmp_node2 is None:
raise Exception("[Error: ]Can not find the corresponding node in std tree. ")
lca_id = std_tree.get_lca(tmp_node1.get_id(), tmp_node2.get_id())
if lca_id == -1:
return config_utils.DINF
lca_node = std_id_node_dict[lca_id]
return tmp_node1.root_length + tmp_node2.root_length - 2 * lca_node.root_length | 5,335,128 |
def test_popleft_child_existing_deque():
"""Testing popleft_child method on a existing deque."""
test_deque = Deque([1, 2, 3, 5])
popleft_child_value = test_deque.popleft_child()
assert popleft_child_value == 5 | 5,335,129 |
def test_check_retry_valid():
"""
Test that a retry is valid if the maximum number of retries has not been reached
"""
retry_handler = RetryHandler()
settings = retry_handler.get_retry_options({})
assert retry_handler.check_retry_valid(settings, 0) | 5,335,130 |
def edit_catagory(catagory_id):
"""edit catagory"""
name = request.form.get('name')
guest_id = session['guest_id']
exists = db.session.query(Catalogs).filter_by(name=name,
guest_id=guest_id).scalar()
if exists:
return abort(404)
if name == '':
return redirect(url_for('home.home'))
catagory = db.session.query(Catalogs).filter_by(id=catagory_id).one()
oldname = catagory.name
catagory.name = name
db.session.add(catagory)
db.session.commit()
flash(f"Catagory {oldname} has been updated to {catagory.name}")
return redirect(url_for('home.home')) | 5,335,131 |
def match_patterns(name, name_w_pattern, patterns):
"""March patterns to filename.
Given a SPICE kernel name, a SPICE Kernel name with patterns, and the
possible patterns, provide a dictionary with the patterns as keys and
the patterns values as value after matching it between the SPICE Kernel
name with patterns and without patterns.
For example, given the following arguments:
* name: ``insight_v01.tm``
* name_w_pattern: ``insight_v$VERSION.tm``
The function will return: ``{VERSION: '01'}``
:param name: Name of the SPICE Kernel
:type name: str
:param name_w_pattern: Name of the SPICE Kernel with patterns
:type name_w_pattern: str
:param patterns: List of the possible patterns present in the
SPICE Kernel name with patterns
:type patterns: list
:return: Dictionary providing the patterns and their value as defined
by the SPICE kernel
:rtype: dict
"""
#
# This list will help us determine the order of the patterns in the file
# name because later on the patterns need to be correlated with the
# pattern values.
#
pattern_name_order = {}
name_check = name_w_pattern
for pattern in patterns:
pattern_name_order[pattern["#text"]] = name_w_pattern.find(pattern["#text"])
name_check = name_check.replace(
"$" + pattern["#text"], "$" * int(pattern["@length"])
)
#
# Convert the pattern_name_order_dictionary into an ordered lis
#
pattern_name_order = list(
{
k: v
for k, v in sorted(pattern_name_order.items(), key=lambda item: item[1])
}.keys()
)
#
# Generate a list of values extracted from the comparison of the
# original file and the file with patterns.
#
values_list = []
value = ""
value_bool = False
for i in range(len(name_check)):
if (name_check[i] == name[i]) and (not value_bool):
continue
if (name_check[i] == name[i]) and value_bool:
value_bool = False
values_list.append(value)
value = ""
elif (name_check[i] == "$") and (not value_bool):
value_bool = True
value += name[i]
elif (name_check[i] == "$") and value_bool:
value += name[i]
else:
raise
#
# Correlate the values with their position in the file name with
# patterns.
#
values = {}
for i in range(len(values_list)):
values[pattern_name_order[i]] = values_list[i]
return values | 5,335,132 |
def _generate_good_delivery_token_email(request, good_delivery, msg=''):
"""
Send an email to user with good_delivery activation URL
and return the token
:type request: HttpRequest
:type good_delivery: GoodDelivery
:type msg: String
:param structure_slug: current HttpRequest
:param structure_slug: good delivery to confirm
:param structure: message to send
:return: generated token
"""
if good_delivery.delivered_to.email:
# build good_delivery jwt
token = good_delivery.build_jwt()
# build absolute URI, attach token and send email
uri = request.build_absolute_uri(reverse('good_delivery:user_use_token'))
mail_params = {'hostname': settings.HOSTNAME,
'user': good_delivery.delivered_to,
'url': '{}?token={}'.format(uri, token),
'added_text': msg
}
m_subject = _('{} - {}').format(settings.HOSTNAME, good_delivery)
send_custom_mail(subject=m_subject,
recipients=[good_delivery.delivered_to],
body=settings.NEW_DELIVERY_WITH_TOKEN_CREATED,
params=mail_params)
return token | 5,335,133 |
def test_fileinrewriterstep_in_and_out_with_formatting():
"""File rewriter step instantiates with in and out applies formatting."""
context = Context({'k1': 'v1',
'root': {'in': 'inpath{k1}here',
'out': 'outpath{k1}here'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathv1here'
assert obj.path_out == 'outpathv1here'
assert obj.context == context
assert obj.logger.name == 'blah.name' | 5,335,134 |
def get_flavor(disk=None, min_disk=None, min_ram=None, name=None, ram=None, region=None, rx_tx_factor=None, swap=None, vcpus=None):
"""
Use this data source to get the ID of an available OpenStack flavor.
"""
__args__ = dict()
__args__['disk'] = disk
__args__['minDisk'] = min_disk
__args__['minRam'] = min_ram
__args__['name'] = name
__args__['ram'] = ram
__args__['region'] = region
__args__['rxTxFactor'] = rx_tx_factor
__args__['swap'] = swap
__args__['vcpus'] = vcpus
__ret__ = pulumi.runtime.invoke('openstack:compute/getFlavor:getFlavor', __args__)
return GetFlavorResult(
is_public=__ret__.get('isPublic'),
region=__ret__.get('region'),
id=__ret__.get('id')) | 5,335,135 |
def deal_hands(deck: Deck) -> Tuple[Deck, Deck, Deck, Deck]:
"""Deal the cards in the deck into four hands"""
return (deck[0::4], deck[1::4], deck[2::4], deck[3::4]) | 5,335,136 |
def add_new_publication_group(project):
"""
Create a new publication_group
POST data MUST be in JSON format
POST data SHOULD contain the following:
name: name for the group
published: publication status for the group, 0 meaning unpublished
"""
request_data = request.get_json()
if not request_data:
return jsonify({"msg": "No data provided."}), 400
groups = get_table("publication_group")
connection = db_engine.connect()
insert = groups.insert()
new_group = {
"name": request_data.get("name", None),
"published": request_data.get("published", 0)
}
try:
result = connection.execute(insert, **new_group)
new_row = select([groups]).where(groups.c.id == result.inserted_primary_key[0])
new_row = dict(connection.execute(new_row).fetchone())
result = {
"msg": "Created new group with ID {}".format(result.inserted_primary_key[0]),
"row": new_row
}
return jsonify(result), 201
except Exception as e:
result = {
"msg": "Failed to create new group",
"reason": str(e)
}
return jsonify(result), 500
finally:
connection.close() | 5,335,137 |
def basic_demo():
"""
Enable this to be run as a CLI script, as well as used as a library.
Mostly intended for testing or a basic demo.
"""
# Get the command-line arguments
parser = argparse.ArgumentParser(description='Perform SNMP discovery on a host, \
returning its data in a single structure.')
parser.add_argument('--hostname',
type=str,
action='store',
default='localhost',
help='The hostname or address to perform discovery on. Default: localhost')
parser.add_argument('--community',
type=str,
action='store',
dest='community',
default='public',
help='SNMP v2 community string. Default: public')
parser.add_argument('--file',
type=str,
action='store',
dest='filepath',
default=None,
help='Filepath to write the results to. Default: STDOUT.')
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
args = parser.parse_args()
# Set debug logging, if requested
if args.debug:
logger = create_logger(loglevel="debug")
# Normal logging if we're writing to a file
elif args.filepath:
logger = create_logger()
# Suppress INFO output if we're returning it to STDOUT:
# don't require the user to filter the output to make it useful.
else:
logger = create_logger(loglevel="warning")
# Perform SNMP discovery on a device,
# sending the result to STDOUT or a file, depending on what the user told us.
if args.filepath:
netdescribe.files.snmp_to_json(args.hostname, args.community, args.filepath, logger)
else:
netdescribe.stdout.snmp_to_json(args.hostname, args.community, logger) | 5,335,138 |
def test_gmres_against_graph_scipy(n, tensor_type, dtype, error, preconditioner, solve_method):
"""
Feature: ALL TO ALL
Description: test cases for [N x N] X [N X 1]
Expectation: the result match scipy in graph
"""
if not _is_valid_platform(tensor_type):
return
# Input CSRTensor of gmres in mindspore graph mode is not supported, just ignored it.
if tensor_type == "CSRTensor":
return
class TestNet(nn.Cell):
def __init__(self, solve_method):
super(TestNet, self).__init__()
self.solve_method = solve_method
def construct(self, a, b, x0, tol, restart, maxiter, m, atol):
return msp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter, M=m,
atol=atol, solve_method=self.solve_method)
onp.random.seed(0)
a = create_full_rank_matrix((n, n), dtype)
b = onp.random.rand(n).astype(dtype)
x0 = onp.zeros_like(b).astype(dtype)
m = _fetch_preconditioner(preconditioner, a)
tol = float(onp.finfo(dtype=dtype).eps)
atol = tol
restart = n
maxiter = None
scipy_output, _ = osp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter, M=m, atol=atol)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
a = to_tensor((a, tensor_type))
b = Tensor(b)
x0 = Tensor(x0)
m = to_tensor((m, tensor_type)) if m is not None else m
# Not in graph's construct
ms_output, _ = msp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter,
M=m, atol=atol)
assert onp.allclose(scipy_output, ms_output.asnumpy(), rtol=error, atol=error)
# With in graph's construct
ms_net_output, _ = TestNet(solve_method)(a, b, x0, tol, restart, maxiter, m, atol)
assert onp.allclose(scipy_output, ms_net_output.asnumpy(), rtol=error, atol=error) | 5,335,139 |
def wtime() -> float:
"""
:return: the current time as a floating point number.
"""
return MPI.Wtime() | 5,335,140 |
def test_filter_syncing_pools():
"""test filter_syncing_pools
"""
myzfssnapshot=flexmock(zfssnapshot)
myzfssnapshot.should_receive('is_syncing').and_return(
False, True, False).one_by_one()
r = myzfssnapshot.filter_syncing_pools(['tank/foo',
'tank/bar',
'deadweight/foo',
'deadweight/bar',
'tan/deadweight'])
assert_equal(r, ['tank/foo','tank/bar','tan/deadweight']) | 5,335,141 |
def get_paginated_results(func, key, **kwargs):
"""
Many boto3 methods return only a limited number of results at once,
with pagination information. This function handles the pagination to
retrieve the entire result set.
@param func
The function to call to get the data.
@param key
The key in the result set returned from the function containing the data
that we want
@param kwargs
The arguments to pass to each call to func.
@returns
An iterable containing the results of the consecutive calls to func.
"""
next_token = ''
more = True
while more:
result = func(**kwargs, nextToken=next_token)
things = result[key]
next_token = result.get('nextToken', None)
more = next_token != None
for thing in things:
yield thing | 5,335,142 |
def survival_df(data, t_col="t", e_col="e", label_col="Y", exclude_col=[]):
"""
Transform original DataFrame to survival dataframe that would be used in model
training or predicting.
Parameters
----------
data: DataFrame
Survival data to be transformed.
t_col: str
Column name of data indicating time.
e_col: str
Column name of data indicating events or status.
label_col: str
Name of new label in transformed survival data.
exclude_col: list
Columns to be excluded.
Returns
-------
DataFrame:
Transformed survival data. Negtive values in label are taken as right censored.
"""
x_cols = [c for c in data.columns if c not in [t_col, e_col] + exclude_col]
# Negtive values are taken as right censored
data.loc[:, label_col] = data.loc[:, t_col]
data.loc[data[e_col] == 0, label_col] = - data.loc[data[e_col] == 0, label_col]
return data[x_cols + [label_col]] | 5,335,143 |
def get_date_today():
"""Get date today in str format such as 20201119. """
return datetime.today().strftime('%Y%m%d') | 5,335,144 |
def check_model_in_dict(name, model_dict):
"""
Check whether the new model, name, exists in all previously considered models,
held in model_lists.
[previously in construct_models]
If name has not been previously considered, False is returned.
"""
# Return true indicates it has not been considered and so can be added
al_name = alph(name)
n_qub = get_num_qubits(name)
if al_name in model_dict[n_qub]:
return True # todo -- make clear if in legacy or running db
else:
return False | 5,335,145 |
def generate_api_key(request):
"""Handles AJAX requests for a new API key."""
new_key = ApiUser.objects.get_unique_key()
return HttpResponse(json.dumps({'token' : new_key}), content_type="application/javascript") | 5,335,146 |
def input_output_details(interpreter):
"""
input_output_details:
Used to get the details from the interperter
"""
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
return input_details, output_details | 5,335,147 |
def charge_drone_battery(drone):
"""Handle the drone battery charging operation."""
battery_level = drone["State"]["Battery"]
if float(battery_level) < 95:
# Increase battery level
drone["State"]["Battery"] = float(battery_level) + 5
else:
# If battery >= 95 set battery level to 100%
drone["State"]["Battery"] = 100
dronelog = gen_DroneLog("Drone %s" % (
str(drone["DroneID"])), "charging complete, returning to Active state")
send_dronelog(dronelog)
drone["State"]["Status"] = "Active"
http_api_log = gen_HttpApiLog("Drone %s" % (
str(drone["DroneID"])), "PUT DroneLog", "Controller")
send_http_api_log(http_api_log)
return drone | 5,335,148 |
def expirations(self, symbol, useDatetime=True, block: bool = True):
"""Gets list of available expiration dates for a symbol.
Calls the 'market/options/expirations.json' endpoint to get list of all
exp_dates available for some given equity.
Args:
symbol: Specify the stock symbol against which to query
useDatetime: Specify whether to return datetime objects, or strings
block: Specify whether to block thread if request exceeds rate limit
Returns:
List of dates (datetime obj, or string)
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
a.expirations('spy')
# [ datetime.datetime(2022, 3, 18, 0, 0), ... ]
a.expirations('spy', useDatetime = False)
# [ '2022-03-18', ... ]
"""
# Create request
req = Expirations(
auth=self.auth, account_nbr=self.account_nbr, block=block, symbol=symbol
)
# Add in the extra information
req.useDatetime = useDatetime
# result
result = req.request()
return result | 5,335,149 |
def track(name, x, direction=None):
"""
An identity function that registers hooks to
track the value and gradient of the specified tensor.
Here is an example of how to track an intermediate output ::
input = ...
conv1 = nnt.track('op', nnt.Conv2d(shape, 4, 3), 'all')
conv2 = nnt.Conv2d(conv1.output_shape, 5, 3)
intermediate = conv1(input)
output = nnt.track('conv2_output', conv2(intermediate), 'all')
loss = T.sum(output ** 2)
loss.backward(retain_graph=True)
d_inter = T.autograd.grad(loss, intermediate, retain_graph=True)
d_out = T.autograd.grad(loss, output)
tracked = nnt.eval_tracked_variables()
testing.assert_allclose(tracked['conv2_output'], nnt.utils.to_numpy(output))
testing.assert_allclose(np.stack(tracked['grad_conv2_output']), nnt.utils.to_numpy(d_out[0]))
testing.assert_allclose(tracked['op'], nnt.utils.to_numpy(intermediate))
for d_inter_, tracked_d_inter_ in zip(d_inter, tracked['grad_op_output']):
testing.assert_allclose(tracked_d_inter_, nnt.utils.to_numpy(d_inter_))
:param name:
name of the tracked tensor.
:param x:
tensor or module to be tracked.
If module, the output of the module will be tracked.
:param direction:
there are 4 options
``None``: tracks only value.
``'forward'``: tracks only value.
``'backward'``: tracks only gradient.
``'all'``: tracks both value and gradient.
Default: ``None``.
:return: `x`.
"""
assert isinstance(name, str), 'name must be a string, got %s' % type(name)
assert isinstance(x, (T.nn.Module, T.Tensor)), 'x must be a Torch Module or Tensor, got %s' % type(x)
assert direction in (
'forward', 'backward', 'all', None), 'direction must be None, \'forward\', \'backward\', or \'all\''
if isinstance(x, T.nn.Module):
if direction in ('forward', 'all', None):
def _forward_hook(module, input, output):
_TRACKS[name] = output.detach()
hooks[name] = x.register_forward_hook(_forward_hook)
if direction in ('backward', 'all'):
def _backward_hook(module, grad_input, grad_output):
_TRACKS['grad_' + name + '_output'] = tuple([grad_out.detach() for grad_out in grad_output])
hooks['grad_' + name + '_output'] = x.register_backward_hook(_backward_hook)
else:
if direction in ('forward', 'all', None):
_TRACKS[name] = x.detach()
if direction in ('backward', 'all'):
def _hook(grad):
_TRACKS['grad_' + name] = tuple([grad_.detach() for grad_ in grad])
hooks['grad_' + name] = x.register_hook(_hook)
return x | 5,335,150 |
def print_summary_title():
"""
Prints the Summary title
"""
print (f"\n") # New line
print ("-" * 40) # Print horizontal line
print (f"SUMMARY")
print ("-" * 40) # Print horizontal line
return | 5,335,151 |
def expected_l1_ls8_folder(
l1_ls8_folder: Path,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="1",
l1_collection="1",
lineage=None,
):
"""
:param collection: The collection of the current scene
:param l1_collection: The collection of the original landsat l1 scene
:return:
"""
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls8c_level1_{collection}"
if collection == "2":
processing_datetime = datetime(2020, 9, 7, 19, 30, 5)
cloud_cover = 93.28
points_model = 125
points_version = 5
rmse_model_x = 4.525
rmse_model_y = 5.917
software_version = "LPGS_15.3.1c"
uuid = "d9221c40-24c3-5356-ab22-4dcac2bf2d70"
quality_tag = "QA_PIXEL"
else:
processing_datetime = datetime(2017, 4, 5, 11, 17, 36)
cloud_cover = 93.22
points_model = 66
points_version = 4
rmse_model_x = 4.593
rmse_model_y = 5.817
software_version = "LPGS_2.7.0"
uuid = "a780754e-a884-58a7-9ac0-df518a67f59d"
quality_tag = "BQA"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_090084_2016-01-21",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(2016, 1, 21, 23, 50, 23, 54435),
# The minor version comes from the processing date (as used in filenames to distinguish reprocesses).
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level1",
"odc:region_code": "090084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 15.0,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 74.007_443_8,
"eo:sun_elevation": 55.486_483,
"landsat:collection_category": "T1",
"landsat:collection_number": int(l1_collection),
"landsat:data_type": "L1TP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1",
"landsat:landsat_scene_id": "LC80900842016021LGN02",
"landsat:processing_software_version": software_version,
"landsat:station_id": "LGN",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[879307.5, -3776885.4340469087],
[879307.5, -3778240.713151076],
[839623.3108524992, -3938223.736900397],
[832105.7835592609, -3953107.5],
[831455.8296215904, -3953107.5],
[831453.7930575205, -3953115.0],
[819969.5411349908, -3953115.0],
[641985.0, -3906446.160824098],
[641985.0, -3889797.3351159613],
[685647.6920251067, -3717468.346156044],
[688909.3673333039, -3714585.0],
[708011.4230769231, -3714585.0],
[879315.0, -3761214.3020833335],
[879315.0, -3776857.8139976147],
[879307.5, -3776885.4340469087],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
3955.5,
0.0,
641_985.0,
0.0,
-3975.500_000_000_000_5,
-3_714_585.0,
0.0,
0.0,
1.0,
),
},
"panchromatic": {
"shape": (60, 60),
"transform": (
3955.25,
0.0,
641_992.5,
0.0,
-3975.25,
-3_714_592.5,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"coastal_aerosol": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B1.TIF",
)
},
"blue": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B2.TIF",
)
},
"green": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B3.TIF",
)
},
"red": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B4.TIF",
)
},
"nir": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B5.TIF",
)
},
"swir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B6.TIF",
)
},
"swir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B7.TIF",
)
},
"panchromatic": {
"grid": "panchromatic",
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B8.TIF",
),
},
"cirrus": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B9.TIF",
)
},
"lwir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B10.TIF",
)
},
"lwir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B11.TIF",
)
},
"quality": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_{quality_tag}.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
} | 5,335,152 |
def fetchPackageNames(graphJson):
"""Parses serialized graph and returns all package names it uses
:param graphJson: Serialized graph
:type graphJson: dict
:rtyoe: list(str)
"""
packages = set()
def worker(graphData):
for node in graphData["nodes"]:
packages.add(node["package"])
for inpJson in node["inputs"]:
packages.add(inpJson['package'])
for outJson in node["inputs"]:
packages.add(outJson['package'])
if "graphData" in node:
worker(node["graphData"])
worker(graphJson)
return packages | 5,335,153 |
def get_nc_BGrid_POP(grdfile, name='POP_NEP', \
xrange=(170,270), yrange=(240, 350)):
"""
grd = get_nc_BGrid_POP(grdfile)
Load Bgrid object for POP from netCDF file
"""
nc = pycnal.io.Dataset(grdfile)
lon_t = nc.variables['TLONG'][:]
lat_t = nc.variables['TLAT'][:]
lon_u = nc.variables['ULONG'][:]
lat_u = nc.variables['ULAT'][:]
angle = nc.variables['ANGLET'][:]
h_t = nc.variables['HT'][:]
h_u = nc.variables['HU'][:]
z_t = nc.variables['z_t'][:]
z_w_top = nc.variables['z_w_top'][:]
z_w_bot = nc.variables['z_w_bot'][:]
z_w = np.zeros(z_t.size + 1)
z_w[:-1] = z_w_top
z_w[-1] = z_w_bot[-1]
return BGrid_POP(lon_t, lat_t, lon_u, lat_u, angle, h_t, h_u, z_t, z_w, \
name, xrange, yrange) | 5,335,154 |
def types_and_shorthands():
"""a mapping from type names in the json doc to their
one letter short hands in the output of 'attr'
"""
return {
'int': 'i',
'uint': 'u',
'bool': 'b',
'decimal': 'd',
'color': 'c',
'string': 's',
'regex': 'r',
'SplitAlign': 'n',
'LayoutAlgorithm': 'n',
'font': 'f',
'Rectangle': 'R',
'WindowID': 'w',
} | 5,335,155 |
def _make_event_from_message(message):
"""Turn a raw message from the wire into an event.Event object
"""
if 'oslo.message' in message:
# Unpack the RPC call body and discard the envelope
message = rpc_common.deserialize_msg(message)
tenant_id = _get_tenant_id_for_message(message)
crud = event.UPDATE
router_id = None
if message.get('method') == 'router_deleted':
crud = event.DELETE
router_id = message.get('args', {}).get('router_id')
else:
event_type = message.get('event_type', '')
# Router id is not always present, but look for it as though
# it is to avoid duplicating this line a few times.
router_id = message.get('payload', {}).get('router', {}).get('id')
if event_type.startswith('routerstatus.update'):
# We generate these events ourself, so ignore them.
return None
if event_type == 'router.create.end':
crud = event.CREATE
elif event_type == 'router.delete.end':
crud = event.DELETE
router_id = message.get('payload', {}).get('router_id')
elif event_type in _INTERFACE_NOTIFICATIONS:
crud = event.UPDATE
router_id = message.get(
'payload', {}
).get('router.interface', {}).get('id')
elif event_type in _INTERESTING_NOTIFICATIONS:
crud = event.UPDATE
elif event_type.endswith('.end'):
crud = event.UPDATE
elif event_type.startswith('akanda.rug.command'):
LOG.debug('received a command: %r', message.get('payload'))
# If the message does not specify a tenant, send it to everyone
pl = message.get('payload', {})
tenant_id = pl.get('tenant_id', '*')
router_id = pl.get('router_id')
crud = event.COMMAND
if pl.get('command') == commands.POLL:
return event.Event(
tenant_id='*',
router_id='*',
crud=event.POLL,
body={},
)
else:
# LOG.debug('ignoring message %r', message)
return None
return event.Event(tenant_id, router_id, crud, message) | 5,335,156 |
def to_region(obj):
"""Convert `obj` to instance of Region."""
if obj is not None and not isinstance(obj, Region):
return Region(*obj)
else:
return obj | 5,335,157 |
def create_textures():
""" Create a list of images for sprites based on the global colors.
!!! SHOULD be able to add custom images in here instead of the general colors."""
texture_list = []
for color in colors:
image = PIL.Image.new('RGB', (WIDTH, HEIGHT), color)
texture_list.append(arcade.Texture(str(color), image=image))
return texture_list | 5,335,158 |
def check_if_process_is_running(process_name):
""""
Check if there is any running process that contains the given name process_name.
"""
# Iterate over the all the running process
for process in psutil.process_iter():
try:
# Check if process name contains the given name string.
if process_name.lower() in process.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False | 5,335,159 |
def uncover_homework(filepath, cipher, fs=None):
"""Convert a text file into a homework.
A homework is defined as two files, one with some lines changed according
to the language defined by the library (see the docs) and another one with
the solution. The solution file is encrypted so that the student's do not
have it.
Args:
filepath (str): Filepath to the encrypted homework.
fs (str): The filepath to store the discovered solution. If None (default),
this function will create a file based on the provided argument
for filepath.
cipher (obj): An object with decrypt and encrypt methods, for instance
cryptography.fernet.Fernet.
"""
if fs is None:
fs = make_derivate_filepath(filepath, tag='_uncovered')
with open(filepath, 'rb') as f, open(fs, 'wb') as s:
for line in f:
try:
line = cipher.decrypt(line)
except cryptography.fernet.InvalidToken:
if line == '':
pass
else:
raise
else:
s.write(line) | 5,335,160 |
def fair_d6(seed=None):
"""Uses a biased d6 to generate fair values between 1 and 6."""
# pick random weights for the faces, then normalize
if seed:
random.seed(seed)
faces = [random.random() for x in range(6)]
total = sum(faces)
faces = map(lambda x: x / total, faces)
faces = [sum(faces[:x]) for x in range(1,7)]
print faces
# Roll a biased d6. If we see a 1, 2, or 3, followed by a 4, 5, or 6, call
# that a 0, and call a 4, 5, or 6 followed by a 1, 2, or 3 a 1. Ignore all
# other results. This gives us a 0 or a 1 with equal probability.
d6 = BiasedDie(faces, r) # ok to re-use r, we're done with the stream now
while True:
s = '0b' # yeah this is clowny
while len(s) < 5:
a = d6.roll()
b = d6.roll()
if a <= 3 and b >= 4:
s += '0'
elif a >= 4 and b <= 3:
s += '1'
result = int(s, 0)
if result > 0 and result < 7:
yield result | 5,335,161 |
def tweet_sunset():
""" Sends a tweet about sunset """
try:
now = datetime.now()
time_ran = now.strftime("%-I:%M %p")
day_length_text = ""
# Compute Length of Day
_, sunset_time, day_length_today = _get_sunrise_sunset_times(now)
_, _, day_length_tomorrow = _get_sunrise_sunset_times(now+timedelta(hours=24))
# Convert to mins and secs
time_text = _get_mins_and_secs_str_from_secs(abs(day_length_tomorrow - day_length_today))
if (time_text == ""):
time_text = "a few milliseconds"
if (day_length_today < day_length_tomorrow):
direction, increment = "increasing", "longer"
else:
direction, increment = "decreasing", "shorter"
day_length_text = f"\n\nThe amount of daylight will be {direction} tomorrow as the daytime will be {increment} by {time_text}."
tweet_text = f"The sun has now set at {sunset_time} in Grand Forks, ND!{day_length_text}".strip()
tweet(tweet_text, enable_tweet=True)
except Exception as e:
print(f"Failed Run: {time_ran}\n" + str(e)) | 5,335,162 |
def exec_main_with_profiler(options: "optparse.Values") -> int:
"""Enable profiler."""
import cProfile
import pstats
import io
from pstats import SortKey # type: ignore
profile = cProfile.Profile()
profile.enable()
ret = exec_main(options)
profile.disable()
string_io = io.StringIO()
sortby = SortKey.TIME
print_stats = pstats.Stats(profile, stream=string_io).sort_stats(sortby)
print_stats.print_stats(40)
print(string_io.getvalue())
return ret | 5,335,163 |
def put_text(image, text, point, scale, color, thickness):
"""Draws text in image.
# Arguments
image: Numpy array.
text: String. Text to be drawn.
point: Tuple of coordinates indicating the top corner of the text.
scale: Float. Scale of text.
color: Tuple of integers. RGB color coordinates.
thickness: Integer. Thickness of the lines used for drawing text.
# Returns
Numpy array with shape ``[H, W, 3]``. Image with text.
"""
# cv2.putText returns an image in contrast to other drawing cv2 functions.
return cv2.putText(image, text, point, FONT, scale, color, thickness, LINE) | 5,335,164 |
def test_url_join():
"""
Some basic URL joining tests.
"""
url = URL("https://example.org:123/path/to/somewhere")
assert url.join('/somewhere-else') == "https://example.org:123/somewhere-else"
assert url.join('somewhere-else') == "https://example.org:123/path/to/somewhere-else"
assert url.join('../somewhere-else') == "https://example.org:123/path/somewhere-else"
assert url.join('../../somewhere-else') == "https://example.org:123/somewhere-else" | 5,335,165 |
def squash_dimensions(
dimensions: List[Dimension], check_path_changes=True
) -> Dimension:
"""Squash a list of nested Dimensions into a single one.
Args:
dimensions: The Dimensions to squash, from slowest to fastest moving
check_path_changes: If True then check that nesting the output
Dimension within other Dimensions will provide the same path
as nesting the input Dimension within other Dimensions
See Also:
`why-squash-can-change-path`
>>> dimx = Dimension({"x": np.array([1, 2])}, snake=True)
>>> dimy = Dimension({"y": np.array([3, 4])})
>>> squash_dimensions([dimy, dimx]).midpoints
{'y': array([3, 3, 4, 4]), 'x': array([1, 2, 2, 1])}
"""
path = Path(dimensions)
# Comsuming a Path of these dimensions performs the squash
# TODO: dim.tile might give better performance but is much longer
squashed = path.consume()
# Check that the squash is the same as the original
if dimensions and dimensions[0].snake:
squashed.snake = True
# The top level is snaking, so this dimension will run backwards
# This means any non-snaking axes will run backwards, which is
# surprising, so don't allow it
if check_path_changes:
non_snaking = [k for d in dimensions for k in d.axes() if not d.snake]
if non_snaking:
raise ValueError(
f"Cannot squash non-snaking Specs in a snaking Dimension "
f"otherwise {non_snaking} would run backwards"
)
elif check_path_changes:
# The top level is not snaking, so make sure there is an even
# number of iterations of any snaking axis within it so it
# doesn't jump when this dimension is iterated a second time
for i, dim in enumerate(dimensions):
# A snaking dimension within a non-snaking top level must repeat
# an even number of times
if dim.snake and np.product(path._lengths[:i]) % 2:
raise ValueError(
f"Cannot squash snaking Specs in a non-snaking Dimension "
f"when they do not repeat an even number of times "
f"otherwise {dim.axes()} would jump in position"
)
return squashed | 5,335,166 |
def do_delete(cs, args):
"""Delete specified instance(s)."""
utils.do_action_on_many(
lambda s: cs.instances.delete(_find_instance(cs, s)),
args.instance,
_("Request to delete instance %s has been accepted."),
_("Unable to delete the specified instance(s).")) | 5,335,167 |
def CreateRootRelativePath(self, path):
"""
Generate a path relative from the root
"""
result_path = self.engine_node.make_node(path)
return result_path.abspath() | 5,335,168 |
def test_packages(host, name, codenames):
"""
Test installed packages
"""
if host.system_info.distribution not in ['debian', 'ubuntu']:
pytest.skip('{} ({}) distribution not managed'.format(
host.system_info.distribution, host.system_info.release))
if codenames and host.system_info.codename.lower() not in codenames:
pytest.skip('{} package not used with {} ({})'.format(
name, host.system_info.distribution, host.system_info.codename))
assert host.package(name).is_installed | 5,335,169 |
def commit():
""" copy built docs to gh-pages and commit"""
dest = join(PATH['gh-pages'], GIT_NAME)
src = join(PATH['root'], 'build/html')
os.chdir(dest)
os.system('rm -rf *')
print('Copying docs to:',dest)
copytree(src,dest)
os.system("git add *")
os.system("git commit -am 'autopublish'")
os.system("git push -u origin gh-pages")
os.chdir(PATH['root']) | 5,335,170 |
async def test_user_flow_cannot_connect(hass):
"""Test that config flow handles connection errors."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
side_effect=CannotConnect(),
):
# step: user
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result_user["type"] == "form"
assert result_user["errors"] == {"base": "cannot_connect"} | 5,335,171 |
def test_node():
"""Test creation of node."""
node = Node(1, 9000)
assert node.name == "nodo1"
assert str(node) == str({"name": "nodo1", "port": 9000}) | 5,335,172 |
def resolve_font(name):
"""Sloppy way to turn font names into absolute filenames
This isn't intended to be a proper font lookup tool but rather a
dirty tool to not have to specify the absolute filename every
time.
For example::
>>> path = resolve_font('IndUni-H-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> indunih_path = os.path.join(fontdir, 'IndUni-H-Bold.ttf')
>>> assert path == indunih_path
This isn't case-sensitive::
>>> assert resolve_font('induni-h') == indunih_path
Raises :exc:`FontNotFound` on failure::
>>> resolve_font('blahahaha')
Traceback (most recent call last):
...
FontNotFound: Can't find 'blahahaha' :'( Try adding it to ~/.fonts
"""
for fontdir, fontfiles in get_font_files():
for fontfile in fontfiles:
if name.lower() in fontfile.lower():
return os.path.join(fontdir, fontfile)
raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts") | 5,335,173 |
def to_shape(shape_ser):
""" Deserializes a shape into a Shapely object - can handle WKT, GeoJSON,
Python dictionaries and Shapely types.
"""
if isinstance(shape_ser, str):
try:
# Redirecting stdout because there's a low level exception that
# prints.
with redirect_stderr("/dev/null"):
shape_obj = wkt_loads(shape_ser)
except WKTReadingError:
try:
shape_obj = shape(json.loads(shape_ser))
except Exception:
raise TypeError(
"{} is not serializable to a shape.".format(str(shape_ser))
)
elif isinstance(shape_ser, Mapping):
shape_obj = shape(shape_ser)
elif isinstance(
shape_ser,
(
Point,
MultiPoint,
LineString,
MultiLineString,
Polygon,
MultiPolygon,
),
):
shape_obj = shape_ser
else:
raise TypeError(
"{} is not serializable to a shape.".format(str(shape_ser))
)
return shape_obj | 5,335,174 |
def _from_module(module, object):
"""
Return true if the given object is defined in the given module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function") | 5,335,175 |
def _escape_pgpass(txt):
"""
Escape a fragment of a PostgreSQL .pgpass file.
"""
return txt.replace('\\', '\\\\').replace(':', '\\:') | 5,335,176 |
def harvest_dirs(path):
"""Return a list of versioned directories under working copy
directory PATH, inclusive."""
# 'svn status' output line matcher, taken from the Subversion test suite
rm = re.compile('^([!MACDRUG_ ][MACDRUG_ ])([L ])([+ ])([S ])([KOBT ]) ' \
'([* ]) [^0-9-]*(\d+|-|\?) +(\d|-|\?)+ +(\S+) +(.+)')
dirs = []
fp = os.popen('%s status --verbose %s' % (SVN_BINARY, path))
while 1:
line = fp.readline()
if not line:
break
line = line.rstrip()
if line.startswith('Performing'):
break
match = rm.search(line)
if match:
stpath = match.group(10)
try:
if os.path.isdir(stpath):
dirs.append(stpath)
except:
pass
return dirs | 5,335,177 |
def gen_index(doc_term_map: Dict[PT.Word, Set[PT.IndexNum]],
dependency_map: Dict[PT.IndexNum, Count],
i: PT.IndexNum,
words: List[PT.Word]
) -> PT.PkgIndex:
"""Generate package index by scoring each word / term."""
word_freq: Dict[PT.Word, Count] = utils.count_freq(words)
total_docs = len(doc_term_map)
pkg_index: PT.PkgIndex = dict()
for word in word_freq:
doc_inverse_freq = get_doc_inverse_freq(total_docs,
len(doc_term_map[word]))
dependency_freq = get_dependency_freq(i, dependency_map)
pkg_index[word] = math.log(word_freq[word] *
doc_inverse_freq *
dependency_freq)
return pkg_index | 5,335,178 |
def arr_to_dict(arr, ref_dict):
"""
Transform an array of data into a dictionary keyed by the same keys in
ref_dict, with data divided into chunks of the same length as in ref_dict.
Requires that the length of the array is the sum of the lengths of the
arrays in each entry of ref_dict. The other dimensions of the input
array and reference dict can differ.
Arguments
---------
arr : array
Input array to be transformed into dictionary.
ref_dict : dict
Reference dictionary containing the keys used to construct the output
dictionary.
Returns
-------
out : dict
Dictionary of values from arr keyed with keys from ref_dict.
"""
out = OrderedDict()
idx = 0
assert len(arr) == sum([len(v) for v in ref_dict.values()])
for k, bd in ref_dict.items():
out[k] = arr[idx : idx + len(bd)]
idx += len(bd)
return out | 5,335,179 |
def test_makecpt_truncated_zlow_zhigh(position):
"""
Use static color palette table that is truncated to z-low and z-high.
"""
fig = Figure()
makecpt(cmap="rainbow", truncate=[0.15, 0.85], series=[0, 1000])
fig.colorbar(cmap=True, frame=True, position=position)
return fig | 5,335,180 |
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list | 5,335,181 |
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout") | 5,335,182 |
def login():
"""
Handles user authentication.
The hash of the password the user entered is compared to the hash in the database.
Also saves the user_id in the user's session.
"""
form = SignInForm()
banned = None
reason = None
if form.validate_on_submit():
user_id = form.user_id.data
password = form.password.data
db = get_db()
user = db.execute("""SELECT * FROM users
where user_id = ?;""", (user_id,)).fetchone()
if user is None:
form.user_id.errors.append("Unkown user id")
elif not check_password_hash(user["password"], password):
form.password.errors.append("Incorrect password!")
elif user["isBanned"] == 1:
banned = "You have been banned"
reason = user["bannedReason"]
else:
session.clear()
session["user_id"] = user_id
next_page = request.args.get("next")
if not next_page:
next_page = url_for("chat")
return redirect(next_page)
return render_template("login.html", form=form, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, banned=banned, reason=reason) | 5,335,183 |
def create_ipu_strategy(num_ipus,
fp_exceptions=False,
enable_recomputation=True,
min_remote_tensor_size=50000,
max_cross_replica_sum_buffer_size=10*1024*1024):
"""
Creates an IPU config and returns an IPU strategy ready to run
something on IPUs
:param num_ipus: Int representing the number of IPUs required.
:param fp_exceptions: Bool, if True floating point exceptions will
be raised.
:param enable_recomputation: Bool, if True recomputation will be
enabled.
:param min_remote_tensor_size: The minimum size (in bytes) a tensor
must be in order to be considered for being stored in remote
memory.
:param max_cross_replica_sum_buffer_size: The maximum number of bytes
that can be waiting before a cross replica sum op is scheduled.
Represents an always-live vs not-always-live trade off. The
default used here is effective for BERT.
:return: An IPU strategy
"""
ipu_config = ipu.config.IPUConfig()
ipu_config.auto_select_ipus = num_ipus
ipu_config.allow_recompute = enable_recomputation
ipu_config.floating_point_behaviour.inv = fp_exceptions
ipu_config.floating_point_behaviour.div0 = fp_exceptions
ipu_config.floating_point_behaviour.oflo = fp_exceptions
ipu_config.floating_point_behaviour.nanoo = fp_exceptions
ipu_config.optimizations.minimum_remote_tensor_size = min_remote_tensor_size
ipu_config.optimizations.merge_infeed_io_copies = True
ipu_config.optimizations.maximum_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size
ipu_config.device_connection.type = ipu.config.DeviceConnectionType.ON_DEMAND
ipu_config.device_connection.enable_remote_buffers = True
ipu_config.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategy()
return strategy | 5,335,184 |
def run_pipeline(context, func, ast, func_signature,
pipeline=None, **kwargs):
"""
Run a bunch of AST transformers and visitors on the AST.
"""
# print __import__('ast').dump(ast)
pipeline = pipeline or context.numba_pipeline(context, func, ast,
func_signature, **kwargs)
return pipeline, pipeline.run_pipeline() | 5,335,185 |
def convert_acc_data_to_g(
data: Union[AccDataFrame, ImuDataFrame], inplace: Optional[bool] = False
) -> Optional[Union[AccDataFrame, ImuDataFrame]]:
"""Convert acceleration data from :math:`m/s^2` to g.
Parameters
----------
data : :class:`~biopsykit.utils.datatype_helper.AccDataFrame` or \
:class:`~biopsykit.utils.datatype_helper.ImuDataFrame`
dataframe containing acceleration data.
inplace : bool, optional
whether to perform the operation inplace or not. Default: ``False``
Returns
-------
:class:`~biopsykit.utils.datatype_helper.AccDataFrame` or :class:`~biopsykit.utils.datatype_helper.ImuDataFrame`
acceleration data converted to g
"""
if not inplace:
data = data.copy()
acc_cols = data.filter(like="acc").columns
data.loc[:, acc_cols] = data.loc[:, acc_cols] / 9.81
if inplace:
return None
return data | 5,335,186 |
def replace_by_one_rule(specific_rule: dict, sentence: str):
"""
This function replace a sentence with the given specific replacement dict.
:param specific_rule: A dict containing the replacement rule, where the keys are the words to use, the values will
be replaced by the keys.
:param sentence: A string to be replaced by the dict and given rule.
:return: The string after replaced by the rules.
"""
original = sentence.lower()
for key in specific_rule.keys():
for word in specific_rule[key]:
original = original.replace(word, key)
original = " ".join([i if i != 'be' else 'is' for i in original.split(' ')])
return original.replace('(s)', '').replace('is at there', 'been there').replace('(es)', ''). \
replace('is in there', 'been there').replace('is there', 'been there').replace('possess', 'have') | 5,335,187 |
def get_os(platform):
"""
Return the icon-name of the OS.
@type platform: C{string}
@param platform: A string that represents the platform of the
relay.
@rtype: C{string}
@return: The icon-name version of the OS of the relay.
"""
if platform:
for os in __OS_LIST:
if os in platform:
if os == 'Windows' and 'Server' in platform:
return 'WindowsServer'
else:
return os
return 'NotAvailable' | 5,335,188 |
def get_loglikelihood_fn(dd_s, f_l=f_l, f_h=f_h, n_f=n_f):
"""
x: parameter point
dd_s: signal system
"""
fs = jnp.linspace(f_l, f_h, n_f)
pad_low, pad_high = get_match_pads(fs)
def _ll(x):
# Unpack parameters into dark dress ones
gamma_s, rho_6T, M_chirp_MSUN, log10_q = x
M_chirp = M_chirp_MSUN * MSUN
q = 10 ** log10_q
rho_6 = rho_6T_to_rho6(rho_6T)
f_c = get_f_isco(get_m_1(M_chirp, q))
dd_h = DynamicDress(
gamma_s, rho_6, M_chirp, q, dd_s.Phi_c, dd_s.tT_c, dd_s.dL, f_c
)
return loglikelihood_fft(dd_h, dd_s, fs, pad_low, pad_high)
return _ll | 5,335,189 |
def to_module_name(field):
"""_to_module_name(self, field: str) -> str
Convert module name to match syntax used in https://github.com/brendangregg/FlameGraph
Examples:
[unknown] -> [unknown]'
/usr/bin/firefox -> [firefox]
"""
if field != '[unknown]':
field = '[{}]'.format(field.split('/')[-1])
return field | 5,335,190 |
def ApplyMomentum(variable, accumulation, learning_rate, gradient, momentum, use_nesterov=False, gradient_scale=1.0):
"""apply momentum"""
return apply_momentum.apply_momentum(variable, gradient, accumulation, learning_rate,
momentum, use_nesterov=use_nesterov, grad_scale=gradient_scale) | 5,335,191 |
def init():
"""Return True if the plugin has loaded successfully."""
ok = True
if ok:
#g.registerHandler('start2',onStart2)
g.plugin_signon(__name__)
#serve_thread()
#g.app.remoteserver = ss = LeoSocketServer()
return ok | 5,335,192 |
def _fftconvolve_14(in1, in2, int2_fft, mode="same"):
"""
scipy routine scipy.signal.fftconvolve with kernel already fourier transformed
"""
in1 = signaltools.asarray(in1)
in2 = signaltools.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return signaltools.array([])
s1 = signaltools.array(in1.shape)
s2 = signaltools.array(in2.shape)
shape = s1 + s2 - 1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [signaltools._next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
ret = signaltools.irfftn(signaltools.rfftn(in1, fshape) *
int2_fft, fshape)[fslice].copy()
#np.fft.rfftn(in2, fshape)
if mode == "full":
return ret
elif mode == "same":
return signaltools._centered(ret, s1)
elif mode == "valid":
return signaltools._centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") | 5,335,193 |
def solve_step(previous_solution_space, phase_space_position, step_num):
"""
Solves the differential equation across the full spectrum of trajectory angles and neutrino energies
:param previous_solution_space: solution to previous step of the differential equation
across all angles and energies, includes phase space values in first 2 columns (ndarray)
:param phase_space_position: which cosine / neutrino energy slice for which to compute the solution
(int, row index of previous solution)
:param step_num: step number in discretized radial distance away from the initial core (int)
:return: solution (ndarray)
"""
euler_solution = solve_fixed_energy_angle(
previous_solution_space=previous_solution_space,
phase_space_position=phase_space_position,
step_num=step_num
)
return previous_solution_space[phase_space_position][0], previous_solution_space[phase_space_position][1], euler_solution | 5,335,194 |
def gradientDescentMulti(X, y, theta, alpha, num_iters):
"""
Performs gradient descent to learn theta
theta = gradientDescent(x, y, theta, alpha, num_iters) updates theta by
taking num_iters gradient steps with learning rate alpha
"""
# Initialize some useful values
J_history = []
m = y.size # number of training examples
for i in range(num_iters):
# ====================== YOUR CODE HERE ======================
# Instructions: Perform a single gradient step on the parameter vector
# theta.
#
# Hint: While debugging, it can be useful to print out the values
# of the cost function (computeCost) and gradient here.
#
theta -= alpha * (X.T.dot(X.dot(theta) - y)) / float(m)
# ============================================================
# Save the cost J in every iteration
J_history.append(computeCostMulti(X, y, theta))
return theta, J_history | 5,335,195 |
def execute(*args, **kw):
"""Wrapper for ``Cursor#execute()``."""
return _m.connection["default"].cursor().execute(*args, **kw) | 5,335,196 |
def redeem_with_retry(client, data, headers):
"""
Attempt a redemption. Retry if it fails.
:return: A ``Deferred`` that fires with (duration of successful request,
number of failed requests).
"""
errors = 0
while True:
before = time()
response = yield client.post(
url="http://127.0.0.1:8080/v1/redeem",
data=data,
headers=headers,
)
after = time()
duration = int((after - before) * 1000)
body = yield readBody(response)
if response.code == 200:
print("Request complete in {}ms".format(duration))
returnValue((duration, errors))
errors += 1
try:
reason = loads(body)["reason"]
except ValueError:
reason = body
print("Request failed: {} {}".format(response.code, reason)) | 5,335,197 |
def get_login_client():
"""
Returns a LinodeLoginClient configured as per the config module in this
example project.
"""
return LinodeLoginClient(config.client_id, config.client_secret) | 5,335,198 |
def ensure_path_is_valid(pth):
""" Raise an exception if we can’t write to the specified path """
if isnotpath(pth):
raise FilesystemError(f"Operand must be a path type: {pth}")
if os.path.exists(pth):
if os.path.isdir(pth):
raise FilesystemError(f"Can’t save over directory: {pth}")
raise FilesystemError(f"Output file exists: {pth}")
parent_dir = os.path.dirname(pth)
if not os.path.isdir(parent_dir):
raise FilesystemError(f"Directory doesn’t exist: {parent_dir}") | 5,335,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.