content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from functools import reduce
from operator import add
def sum_node_list(node_list):
"""Custom sum func to avoid creating redundant nodes in Python sum func."""
node_list = [n for n in node_list if n is not None]
if node_list == []:
return None
return reduce(add, node_list)
|
4daffb4c70cc599daa4e656686a969b58dd16ef0
| 701,072
|
def axis_lw():
"""Line width of the axes
"""
return 0.6
|
5e5c3d37dae10ef83b89b0493e897d769119fdea
| 701,074
|
def squares_in_rectangle(length, width):
"""This function is the solution to the Codewars Rectangle into Squares Kata
that can be found at:
https://www.codewars.com/kata/55466989aeecab5aac00003e/train/python."""
if length == width:
return None
squares = []
while length > 0 and width > 0:
if width < length:
squares.append(width)
length = length - width
else:
squares.append(length)
width = width - length
return squares
|
e0e32e2803ca752a24adf4cce353bf6cc49d4c08
| 701,075
|
def keep_node_permissive(data):
"""Return true for all nodes.
Given BEL graph :code:`graph`, applying :func:`keep_node_permissive` with a predicate on the nodes iterable
as in :code:`filter(keep_node_permissive, graph)` will result in the same iterable as iterating directly over a
:class:`BELGraph`
:param dict data: A PyBEL data dictionary
:return: Always returns :data:`True`
:rtype: bool
"""
return True
|
425ebf20686ea309e542b82b2fbbe7aa81cccb63
| 701,076
|
def probability_to_internal(external_values, constr):
"""Reparametrize probability constrained parameters to internal."""
return external_values / external_values[-1]
|
0aaf19d5c2fd56e8e4277729a2a14cf5828bbf47
| 701,077
|
import platform
def get_os():
"""Get system name.
Returns:
str: The system/OS name.
e.g.:
``Linux`` or ``Windows``.
"""
return platform.system().lower()
|
5dd7c30ce53422180e9c75d4aff7912c887cb3e4
| 701,078
|
import torch
def get_params(model):
"""Aggregates model parameters of all linear layers into a vector.
Args:
model: A (pre-trained) torch.nn.Module or torch.nn.Sequential model.
Returns:
The aggregated parameters.
"""
weights = list()
biases = list()
for module in model.modules():
if module.__class__.__name__ in ['Linear']:
weights.append(module.weight.contiguous().view(-1))
biases.append(module.bias)
weights.extend(biases)
return torch.cat(weights)
|
c6a66f3a013f62e79e51f47d130e65b12b38ff33
| 701,079
|
def _lst_for_pes(pes_dct, run_pes_idxs):
""" Get a dictionary of requested species matching the PES_DCT format
"""
red_pes_dct = {}
for (form, pidx, sidx), chnls in pes_dct.items():
# Grab PES if idx in run_pes_idx dct
run_chnl_idxs = run_pes_idxs.get(pidx, None)
if run_chnl_idxs is not None:
# Grab the channels if they are in run_chnl_idxs
red_chnls = ()
for chnl in chnls:
cidx, _ = chnl
if cidx in run_chnl_idxs:
red_chnls += (chnl,)
red_pes_dct[(form, pidx, sidx)] = red_chnls
return red_pes_dct
|
75307b871588b5a9c04a95e29c315c7528fc1259
| 701,080
|
import math
def math_pow(x, y):
"""Implement the SQLite3 math built-in 'pow' via Python.
"""
try:
return math.pow(x, y)
except:
pass
|
d8e1ad37b0fc2f0df6f525cae282478417a919b7
| 701,081
|
def get_voted_content_for_user(user):
"""Returns a dict where:
- The key is the content_type model
- The values are list of id's of the different objects voted by the user
"""
if user.is_anonymous():
return {}
user_votes = {}
for (ct_model, object_id) in user.votes.values_list("content_type__model", "object_id"):
list = user_votes.get(ct_model, [])
list.append(object_id)
user_votes[ct_model] = list
return user_votes
|
799eff5efd184da0b5121f59025c39d0ddb593a3
| 701,082
|
def odd_occurences_in_array(a):
"""
Finds the odd number of occurences of an element in an array.
XOR of all elements gives us odd occurring element.
Note that XOR of two elements is 0 if both elements are same and XOR of a number x with 0 is x
:param a
"""
result = 0
for number in a:
result = result ^ number
return result
|
8085fb8ffa5df9628caa5fef541b5d7b78c372b0
| 701,083
|
def checkExtention(files):
"""
This function will check extention of each file.
"""
for file in files:
if file.name.lower().split(".")[-1] not in ['pdf', "jpeg", "png", "jpg"]:
return file.name + " is not accepted. We accept only 'pdf', 'jpeg', 'png', 'jpg' file format."
return "Files Verified"
|
247f43087d9e85d360900c5da2b8bfa1581117ef
| 701,084
|
def sub(x, y):
"""values are subtracted and return"""
return y - x
|
d07223d6866d27e926952b74d29274f7de1b7a9f
| 701,086
|
import re
def extract_length(value):
"""
extract length data from a provided value
Returns a tuple of a detected length value and a length unit. If no unit
type can be extracted, it will be assumed that the provided value has no
unit.
Args:
value: the value to parse
Returns:
the length and unit type
"""
if not value:
return None, None
matched = re.match(r'^\s*(\d*\.?\d*)\s*(\S*)?\s*$', value)
if not matched:
return None, None
amount, unit = matched.groups()
if not amount:
amount = None
unit = None
elif not unit:
unit = None
return amount, unit
|
b418b76114fafe24c86c2e0dcfa62bb19b29ff59
| 701,087
|
def intersect(set_a, set_b):
"""
"""
return [x for x in set_a if x in set_b]
|
be1bf6cb55d0ab94e68cc05fc6d9fb5d68f5dd4c
| 701,088
|
def tweet_decode(tweet):
""" Gets data from tweet and returns a simplified data structure
tweet = {
'full_text': 'This is a tweet #hello #world http://t.co/13456',
'urls': [ 'http://en.wikipedia.org/' ]
'hashtags': [ '#hello', '#world' ]
}
"""
tweet_simple = {}
# Get data from tweet
tweet_simple['id'] = tweet['tweet']['id']
tweet_simple['full_text'] = tweet['tweet']['full_text'] # Text
tweet_simple['created_at'] = tweet['tweet']['created_at'] # Text
return tweet_simple
|
b7c9667bd3bd755e3e7120f366faf6d19419c316
| 701,089
|
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
|
65dcaf573bf53f0b78bdea83d0ce809bd335a480
| 701,090
|
def _ros(veos, vpos, eos, pos):
"""
ROG = Rate of Senescing (Days)
"""
return (veos - vpos) / (eos - pos)
|
6a944f9c7719866fcd862f8d4b9f088d277b48aa
| 701,091
|
import os
def RunTransformix(command):
"""Run the transfomix transformation. You must be able to call transformix
from your command shell to use this. You must also have your transformation
parameter files set before running (see transformix parameter files).
Parameters
----------
command: string
Sent to the system for transformix running (see elastix command line implementation).
Returns
-------
command: string
Same string as above.
"""
#Print command
print(str(command))
#Print transformix update
#print('Running transformix...')
#Send the command to the shell
os.system(command)
#Print update
#print('Finished')
#Return values
return command
|
01012ed8ff7d8fe807fc6dd6aa78e613868093ac
| 701,092
|
import time
def timer(fcn):
"""Returns the runtime in ms of the function fcn"""
def wrapper(*args, **kwargs):
t0 = time.time_ns()
res = fcn(*args, **kwargs)
t = time.time_ns() - t0
print(f"{fcn.__name__}: {round(t * 10**-6, 1)} 'ms'")
return res
return wrapper
|
9a76c7222f9f427abdde9194535acdf9880a2807
| 701,093
|
def read_landmarks(f_landmarks):
"""
Reads file containing landmarks for image list. Specifically,
<image name> x1 y1 ... xK yK is expected format.
:param f_landmarks: text file with image list and corresponding landmarks
:return: landmarks as type dictionary.
"""
landmarks_lut = {}
with open(f_landmarks) as f:
landmark_lines = f.readlines()
for line in landmark_lines:
line = line.replace("\n", "").split("\t")
landmarks_lut[line[0]] = [int(k) for k in line[1:]]
return landmarks_lut
|
9c4bf6b405f4fb49ace8badb4b3151cc457bbf84
| 701,094
|
def parse_char(ch):
"""
'A' or '\x41' or '0x41' or '41'
'\x00' or '0x00' or '00'
"""
if ch is None:
return None
if len(ch) == 1:
return ord(ch)
if ch[0:2] in ("0x", "\\x"):
ch = ch[2:]
if not ch:
raise ValueError("Empty char")
if len(ch) > 2:
raise ValueError("Char can be only a char letter or hex")
return int(ch, 16)
|
b3726d00839caf92b64d976ed447f8c1824efb9f
| 701,095
|
import json
def trigger_oauth_expires_test(limit):
""" Test data for IFTTT trigger bunq_oauth_expires """
result = [{
"created_at": "2018-01-05T11:25:15+00:00",
"expires_at": "2018-01-05T11:25:15+00:00",
"meta": {
"id": "1",
"timestamp": "1515151515"
}
}, {
"created_at": "2014-10-24T09:03:34+00:00",
"expires_at": "2014-10-24T09:03:34+00:00",
"meta": {
"id": "2",
"timestamp": "1414141414"
}
}, {
"created_at": "2008-05-30T04:20:12+00:00",
"expires_at": "2008-05-30T04:20:12+00:00",
"meta": {
"id": "3",
"timestamp": "1212121212"
}
}]
return json.dumps({"data": result[:limit]})
|
6f0bf5ed1f2749bf67d9068ac9eb85cc6fd86e17
| 701,096
|
def translate_subset_type(subset_type):
"""Maps subset shortcut to full name.
Args:
subset_type (str): Subset shortcut.
Returns:
str: Subset full name.
"""
if subset_type == 'max':
model = 'best'
elif subset_type == 'random':
model = 'random'
elif subset_type == 'min':
model = 'worst'
else:
raise ValueError('Undefined model version: "{}". Use "max", "min" or\
"random" instead.'.format(subset_type))
return model
|
ff40e230c403818b55897ff96bee131d0c687096
| 701,097
|
import argparse
def arg_setup():
"""Sets up an argument parser and returns the arguments."""
parser = argparse.ArgumentParser(
description="run simulations of agents with wishful thinking")
parser.add_argument('scenario',
help="""the scenario to use (currently: sequence, fixed,
gen_from_fixed, pmexp)""")
parser.add_argument('model',
help="""the kind of model to use
(currently: old, valopt, const, constvar)""")
parser.add_argument('episodes', type=int, help="the number of episodes to run")
return parser.parse_args()
|
4a28bb0fa8925350d928b610b9b6bb62798aaf74
| 701,098
|
def get_instruments(instruments, level):
"""Returns a list of themis instruments for L2 data"""
if level == 'l1':
instr_list = ['bau', 'eff', 'efp', 'efw', 'esa', 'fbk', 'fff_16',
'fff_32', 'fff_64', 'ffp_16', 'ffp_32', 'ffp_64',
'ffw_16', 'ffw_32', 'ffw_64', 'fgm', 'fit', 'hsk',
'mom', 'scf', 'scm', 'scmode', 'scp', 'scw', 'spin',
'sst', 'state', 'trg', 'vaf', 'vap', 'vaw']
elif level == 'l2_mag':
instr_list = instruments
else:
instr_list = ['efi', 'esa', 'fbk', 'fft', 'fgm', 'fit', 'gmom',
'mom', 'scm', 'sst']
ans_list = []
if not isinstance(instruments, (list, tuple)):
instruments = [instruments]
for p in instruments:
p = p.lower()
if p == '*':
ans_list = instr_list
break
if p in instr_list:
ans_list.append(p)
return ans_list
|
b797856b35f72eb34fa2d23a2e8d5ff7ade77516
| 701,099
|
def trigger_oauth_expires_delete(identity):
""" Delete a specific trigger identity for trigger bunq_oauth_expires """
# We don't store trigger identities, so this call can be ignored
return ""
|
355084d71efc7789335b7f0ca15e90165a2b5e26
| 701,100
|
def check_multigene(overlaps, min_overlap_bp=0, min_query_overlap=0, min_gene_overlap=.5):
"""
overlaps is a list of: (gene, overlap_bp, overlap_gene_ratio, overlap_query_ratio)
"""
if all(x[1]>=min_overlap_bp and x[2]>=min_gene_overlap and x[3]>=min_query_overlap for x in overlaps):
new_name = "poly-"+"-".join(x[0] for x in overlaps)
return new_name
# elif overlaps[0][2] >= min_gene_overlap: # first gene covers 50% of query
# return overlaps[0][0]
# elif overlaps[-1][2] >= min_gene_overlap:
# return overlaps[-1][0]
else:
return "novel"
|
5f7c3ba7231939e2e54ce236a79025952d5e9a84
| 701,101
|
def get_path_prefix(path):
"""
gets path_prefixe given path
"""
temp_path=list(filter(None, path.split("/")))
j=0
path_prefix=[]
for node in temp_path:
path_prefix.append([node])
return path_prefix
|
728c019d842c3b5bf7c6f6b8cebf13a1ad90bd41
| 701,102
|
import math
def occupancy_to_color(occupancy):
"""
Return corresponding color from blue to red spectrum
"""
# color = [255*(1-isoval), 255*(1-isoval), 255] # white to blue
# color = [255*isoval, 0, 255*(1-isoval)] # blue to red
color = "gray" + str(int(math.floor((1-occupancy)*100))).zfill(2)
if(color == "gray100"):
color = "gray99"
return color
|
838a6025c473b51dab6c38c28f70ba7bafed3071
| 701,103
|
def reshape(a):
"""Combine the last two dimensions of a numpy array"""
all_head_size = a.shape[-2] * a.shape[-1]
new_shape = a.shape[:-2] + (all_head_size,)
return a.reshape(new_shape)
|
7381657c14b8b9245af2d85c1d1b7a0f5d40fff8
| 701,105
|
import argparse
def parse_arguments():
"""
Parse file arguments
"""
parser = argparse.ArgumentParser(description="Predict class for a given input image")
parser.add_argument('image_url', help="url for a test image")
return parser.parse_args()
|
d3f970c14474daa659c78542e709feb0f1e64794
| 701,106
|
def count_paths_recursive(graph, node_val, target):
"""A more conventional recursive function (not a recursive generator)"""
if node_val >= target:
return 1
total = 0
for next_node in graph[node_val]:
total += count_paths_recursive(graph, next_node, target)
return total
|
be2aba80ed7dc734fb30f3b8b1a675d97e73c860
| 701,107
|
import jinja2
def _render(value, params):
"""Renders text, interpolating params."""
return str(jinja2.Template(value).render(params))
|
b8d9698dfaf1c2500abfa272c8da98a0cf3dd482
| 701,108
|
import math
def inWhichGrid(coord, grid_info):
"""
Specify which grid it is in for a given coordinate
:param coord: (latitude, longitude)
:param grid_info: grid_info dictionary
:return: row, column, grid ID
"""
lat, lng = coord
row = math.floor((grid_info['maxLat'] - lat) / grid_info['gridLat'])
col = math.floor((lng - grid_info['minLng']) / grid_info['gridLng'])
gridID = row * grid_info['lngGridNum'] + col
return row, col, gridID
|
260d31b57413902febf503ac3f7a11232940d245
| 701,109
|
def transpose(matrix):
"""Matrix transpose
Args:
matrix: list of list
Returns:
list: list of list
"""
return list(map(list, zip(*matrix)))
|
22c45de26bc19ca69ac0d04af8e2e02697290035
| 701,111
|
def new_line(string: str):
"""
Append a new line at the end of the string
Args:
string: String to make a new line on
Returns: Same string with a new line character
"""
return string + "\n"
|
f4deeaa94a6980f95a3020fa54570fbe1f0a6e9e
| 701,112
|
import itertools
def decaying(start, decay):
"""Return an iterator of exponentially decaying values.
The first value is ``start``. Every further value is obtained by multiplying
the last one by a factor of ``decay``.
Examples
--------
>>> from climin.schedule import decaying
>>> s = decaying(10, .9)
>>> [s.next() for i in range(5)]
[10.0, 9.0, 8.100000000000001, 7.290000000000001, 6.561]
"""
return (start * decay ** i for i in itertools.count(0))
|
ee88365b3a8e768952fc66d02047b60e3e1a34c1
| 701,114
|
import inspect
def _is_bound_method(the_function: object) -> bool:
"""
Returns True if fn is a bound method, regardless of whether
fn was implemented in Python or in C.
"""
if inspect.ismethod(the_function):
return True
if inspect.isbuiltin(the_function):
self = getattr(the_function, "__self__", None)
return not (inspect.ismodule(self) or (self is None))
return False
|
a0d3121c6c4e1c26b2000af73e25a3d772ef2ad3
| 701,115
|
def macro_calc(item):
"""
Calculate PPV_Macro and TPR_Macro.
:param item: PPV or TPR
:type item:dict
:return: PPV_Macro or TPR_Macro as float
"""
try:
item_sum = sum(item.values())
item_len = len(item.values())
return item_sum / item_len
except Exception:
return "None"
|
ec74fe80a4f52d7676beeca1f9abd701043c77af
| 701,116
|
def height_implied_by_aspect_ratio(W, X, Y):
"""
Utility function for calculating height (in pixels)
which is implied by a width, x-range, and y-range.
Simple ratios are used to maintain aspect ratio.
Parameters
----------
W: int
width in pixel
X: tuple(xmin, xmax)
x-range in data units
Y: tuple(xmin, xmax)
x-range in data units
Returns
-------
H: int
height in pixels
Example
-------
plot_width = 1000
x_range = (0,35
y_range = (0, 70)
plot_height = height_implied_by_aspect_ratio(plot_width, x_range, y_range)
"""
return int((W * (Y[1] - Y[0])) / (X[1] - X[0]))
|
8c3225a27f0284acb708434238d4861bb49b0899
| 701,118
|
import re
def get_namespace(element):
"""Extract the namespace using a regular expression."""
match = re.match(r"\{.*\}", element.tag)
return match.group(0) if match else ""
|
9380e104d05ae6b56463b96bf87d9c0fcd8202cf
| 701,119
|
import os
def get_model_name(filename):
"""
>>> get_model_name("logs/0613/0613-q1-0000.train")
'0613-q1-0000'
"""
return os.path.splitext(os.path.basename(filename))[0]
|
a050918e827b6cbce0ecb19d002cc30619f34c6e
| 701,120
|
import json
import hashlib
def hash_config(configuration: dict) -> str:
"""Computes a SHA256 hash from a dictionnary.
Args:
configuration (dict): The configuration to hash
Returns:
str: _description_
"""
stringified = json.dumps(configuration, sort_keys=True)
return hashlib.sha256(stringified.encode("utf-8")).hexdigest()
|
ccf30915fc736ce4724eb0ceceab59220e94efd0
| 701,121
|
def get_bag_of_communities(network, partition):
"""
:param network: dictionary containing for each key (each node/page) a dictionary containing the page categories.
:param partition: list of the community assignment
:return: list of dictionaries, one dictionary per community. Each dictionary contains the categories of all the
nodes of a given community as keys and the number of pages in the community that have this category as values.
"""
k = len(set(partition)) # number of communities
bags_of_categories = [{} for _ in range(k)]
for i, title in enumerate(network.keys()):
cats = network[title]['categories']
if type(partition) == list:
label = partition[i]
else:
label = partition[title]
for c in cats:
if c in bags_of_categories[label].keys():
bags_of_categories[label][c] += 1
else:
bags_of_categories[label][c] = 1
return bags_of_categories
|
615e88393b30b1989d98eeea1ac7123588a51ea9
| 701,122
|
def _to_pascal_case(value: str) -> str:
"""Converts a snake_case string into a PascalCase one."""
value = value.replace("-", "_")
if value.find("_") == -1:
return value[0].upper() + value[1:]
return "".join([v.capitalize() for v in value.split("_")])
|
d41985f1d182b723c9861473e4f9eca8c7a7977e
| 701,123
|
def perma(mat,m):
"""
Permanent of the matrix
"""
if not mat.isSquare:
return None
if mat.isIdentity:
return 1
if mat.dim[0]==2:
return m[0][0]*m[1][1] + m[1][0]*m[0][1]
if mat.dim[0]==3:
return (m[0][0]*m[1][1]*m[2][2] +
m[0][1]*m[1][2]*m[2][0] +
m[0][2]*m[1][0]*m[2][1] +
m[0][2]*m[1][1]*m[2][0] +
m[0][1]*m[1][0]*m[2][2] +
m[0][0]*m[1][2]*m[2][1]
)
total=0
for i in range(mat.dim[0]):
temp = mat.copy
temp.remove(i+1,1)
co = mat.matrix[i][0]
total += co*perma(temp,temp._matrix)
return total
|
71e2408c79e09b4fa5eb328130d2b5b7e7efdf41
| 701,124
|
def remove_list_duplicates(my_list: list) -> list:
""" Removes any duplicated values from a list. """
return list(dict.fromkeys(my_list))
|
e3dd86733117fce7bad2860d860b98b349efcfb5
| 701,125
|
from typing import Callable
import click
def target_id_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for choosing a target ID.
"""
click_option_function: Callable[
[Callable[..., None]],
Callable[..., None],
] = click.option(
'--target-id',
type=str,
help='The ID of a target in the Vuforia database.',
required=True,
)
function: Callable[..., None] = click_option_function(command)
return function
|
1fe67f22e00b4d4930af347c482d28e30032508d
| 701,126
|
def _get_keys(response):
"""
return lists of strings that are the keys from a client.list_objects() response
"""
keys = []
if 'Contents' in response:
objects_list = response['Contents']
keys = [obj['Key'] for obj in objects_list]
return keys
|
dfa1f066ac19138920509935b1dbbdde52591f18
| 701,127
|
def round_updown_to_x(num_in, x, direction="up"):
"""
Rounds a given value (num_in) to the nearest multiple of x, in a given
direction (up or down)
:param num_in: Input value
:param x: Value to round to a multiple of
:param direction: Round up or down. Default: 'up'
:return: Rounded number
"""
if direction == "down":
num_out = int(num_in) - int(num_in) % int(x)
else:
num_out = num_in + (x - num_in) % int(x)
return num_out
|
206b582843eea234b5b655dddd54eea6e32eec42
| 701,128
|
def img2port(img_url):
"""
mimvp.com的端口号用图片来显示, 本函数将图片url转为端口, 目前的临时性方法并不准确
"""
code = img_url.split("=")[-1]
if code.find("AO0OO0O")>0:
return 80
else:
return None
|
60768ee0723808ef7d1f3e74287b7a6e576f94c5
| 701,129
|
def coverage(tiles):
"""Sum of length of all tiles.
"""
accu = 0
for tile in tiles:
accu = accu + tile[2]
return accu
|
b2cbb6b926e070d9fc457523bb60169ef9b70821
| 701,130
|
def get_int() -> int:
"""
Gets a lone integer from the console (the integer is on its own line)
:return: The int value that was read in from the console
"""
line = input().strip()
return int(line)
|
1bffcb1c5c1e7910a11358d4616a747b38ba3d73
| 701,131
|
def regex_search_matches_output(get_output, search):
"""
Applies a regex search func to the current output
"""
return search(get_output())
|
6f054990d3454a447656567b3ceb12a38c2809a5
| 701,132
|
def crop(img, start_y, start_x, h, w):
"""
Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image.
"""
return img[start_y:start_y + h, start_x:start_x + w, :].copy()
|
cb3a05989f1538bcec34102c33291d500a21c59d
| 701,133
|
import sys
def normalize_path(path):
""" Normalizes the path separator to match the Unix standard. """
if sys.platform == 'win32':
return path.replace('\\', '/')
return path
|
b565b2eaefa21708005ecab3538cc34a410b8318
| 701,134
|
def separate_by_class(train):
""" For each dependent variable (last column of DF) value, map to
corresponding rows of the DF
"""
classifications = set(train[:, -1])
return {c: train[train[:, -1] == c] for c in classifications}
|
1ed0fd5c077a0d7391955687ec190ded873a9bee
| 701,135
|
import xml.etree.cElementTree as ET
def getDataSetUuid(xmlfile):
"""
Quickly retrieve the uuid from the root element of a dataset XML file,
using a streaming parser to avoid loading the entire dataset into memory.
Returns None if the parsing fails.
"""
try:
for event, element in ET.iterparse(xmlfile, events=("start",)):
return element.get("UniqueId")
except Exception:
return None
|
7386efea461056f8f4b472923b8f4a30d6d80d3b
| 701,136
|
from typing import Union
def injection_volume(val: Union[float, str]) -> Union[int, str]:
"""
Handle special case for injection volume of ``-1``, which indicates "As Method".
:param val:
:type val:
:return:
:rtype:
"""
if val in {-1, "-1"}:
return "As Method"
else:
return int(val)
|
b09240b9c8bd0eebfb7880494f75b105e313b228
| 701,137
|
from typing import Optional
def validate_charge_efficiency(charge_efficiency: Optional[float]) -> Optional[float]:
"""
Validates the charge efficiency of an object.
Charge efficiency is always optional.
:param charge_efficiency: The charge efficiency of the object.
:return: The validated charge efficiency.
"""
if charge_efficiency is None:
return None
if charge_efficiency < 0 or charge_efficiency > 1:
raise ValueError("Charge efficiency must be between 0 and 1.")
return charge_efficiency
|
46e40fcdc65ffec453f1bac9bd03f6a9297647fd
| 701,138
|
def stream_name_to_dict(stream_name, separator='-'):
"""Transform stream name string to dictionary"""
catalog_name = None
schema_name = None
table_name = stream_name
# Schema and table name can be derived from stream if it's in <schema_nama>-<table_name> format
s_parts = stream_name.split(separator)
if len(s_parts) == 2:
schema_name = s_parts[0]
table_name = s_parts[1]
if len(s_parts) > 2:
catalog_name = s_parts[0]
schema_name = s_parts[1]
table_name = '_'.join(s_parts[2:])
return {
'catalog_name': catalog_name,
'schema_name': schema_name,
'table_name': table_name
}
|
389826fe03f1f5e1c704e2ce54f90da443e1d23c
| 701,140
|
def snake_split(s):
"""
Split a string into words list using snake_case rules.
"""
s = s.strip().replace(" ", "")
return s.split("_")
|
af70dfa1190ec783b8431630be2ae92434af146a
| 701,142
|
import decimal
from typing import Iterable
def try_parse(text, valid_types=None):
"""try to parse a text string as a number. Returns (valid, value) where
valid represents whether the conversion was successful, and value is the result
of the conversion, or None if failed.
Accepts a string, and one optional parameter "valid_types" which is a list of valid number types to
try conversion to
By default, only attempts Decimal
"""
if valid_types is None:
valid_types = [decimal.Decimal]
if isinstance(valid_types, Iterable):
for t in valid_types:
if isinstance(t, type):
try:
result = t(text)
return True, result
except:
pass
else:
raise RuntimeError("Non-type given in type list")
else:
raise RuntimeError("Invalid type list provided")
return False, None
|
c210a88c86b55404cf620f504da7e229a033eeed
| 701,143
|
def find_from(board, word, y, x, seen):
"""Can we find a word on board, starting at x, y?"""
# This is called recursively to find smaller and smaller words
# until all tries are exhausted or until success.
# Base case: this isn't the letter we're looking for.
if board[y][x] != word[0]:
print("%-6s%d,%d %-3s%-8s%-30s" % ("NO", y, x, board[y][x], word,
seen))
return False
# Base case: we've used this letter before in this current path
if (y, x) in seen:
print("%-6s%d,%d %-3s%-8s%-30s" % ("SEEN", y, x, board[y][x], word,
seen))
return False
# Base case: we are down to the last letter --- so we win!
if len(word) == 1:
print("%-6s%d,%d %-3s%-8s%-30s" % ("WIN", y, x, board[y][x], word,
seen))
return True
# Otherwise, this letter is good, so note that we've seen it,
# and try of all of its neighbors for the first letter of the
# rest of the word
print("%-6s%d,%d %-3s%-8s%-30s" % ("OK", y, x, board[y][x], word, seen))
# This next line is a bit tricky: we want to note that we've seen the
# letter at this location. However, we only want the child calls of this
# to get that, and if we used `seen.add(...)` to add it to our set,
# *all* calls would get that, since the set is passed around. That would
# mean that once we try a letter in one call, it could never be tried again,
# even in a totally different path. Therefore, we want to create a *new*
# seen set that is equal to this set plus the new letter. Being a new
# object, rather than a mutated shared object, calls that don't descend
# from us won't have this `y,x` point in their seen.
#
# To do this, we use the | (set-union) operator, read this line as
# "rebind seen to the union of the current seen and the set of point(y,x))."
#
# (this could be written with an augmented operator as "seen |= {(y, x)}",
# in the same way "x = x + 2" can be written as "x += 2", but that would seem
# harder to understand).
seen = seen | {(y, x)}
if y > 0:
if find_from(board, word[1:], y - 1, x, seen):
return True
if y < 4:
if find_from(board, word[1:], y + 1, x, seen):
return True
if x > 0:
if find_from(board, word[1:], y, x - 1, seen):
return True
if x < 4:
if find_from(board, word[1:], y, x + 1, seen):
return True
# Couldn't find the next letter, so this path is dead
return False
|
0e1911f90dd4eb4729d7e896e62bdc2b70ff8ee9
| 701,144
|
def hello():
"""
Basic hello world route to check if server is running.
"""
return "Welcome to Codenames."
|
0b12cc94bc266485199c9737d2cc016c8b4da8fd
| 701,145
|
from typing import List
def partition_labels(S: str) -> List[int]:
"""
consumed: 68ms 64ms
:param S:
:return:
>>> partition_labels('ababcbacadefegdehijhklij')
[9, 7, 8]
>>> partition_labels('eccbbbbdec')
[10]
"""
first_index: int = 0
res = []
while first_index < len(S):
last_index: int = S.rfind(S[first_index])
index = first_index + 1
while index <= last_index:
last_index = max(last_index, S.rfind(S[index]))
index += 1
res.append(last_index - first_index + 1)
first_index = last_index + 1
return res
|
cdc1154ce54116edcc7df52ee97727964289b891
| 701,146
|
from typing import List
def orderings2() -> List[List[int]]:
"""Enumerates the storage orderings an input with rank 2."""
return [[0, 1], [1, 0]]
|
f85f26b79aa25d980877e57cbcdbdb9f7295ac7d
| 701,147
|
def read_file(filepath):
"""Reads text file and returns each line as a list element.
Parameters:
filepath (str): path to file
Returns
list: list of strings
"""
data = []
with open(filepath, 'r', encoding='utf-8') as file_obj:
for line in file_obj.readlines():
line = line.strip()
data.append(line)
return data
|
da1db5ffb99b746271b0fd0f9779da4e6f520246
| 701,148
|
def clear(num: int, start: int, end: int) -> int:
""" Sets zeros on bits of num from start to
end non inclusive """
if start == end:
raise ValueError('Start value same as end')
# Builds group of 1s the size of the start - end
mask: int = (1 << (end - start)) - 1
return num & ~(mask << start)
|
68dd669bd9df5ce260daf09375c6ed872580e475
| 701,149
|
def delete_coa():
"""
Delete a certificate of analysis from storage.
"""
# Remove certificate from storage.
# Remove the certificate data.
return NotImplementedError
|
3e8248b6cf87521aae31b4b4eabb601411f38d54
| 701,150
|
import torch
def f(x):
"""A transformation that has both a fully-predictable part and an inherently-random part."""
a, b = torch.split(x, 16, -1)
return torch.cat((a+1, b-1 + torch.randn_like(b)/3), -1)
|
0da307982a0af0823a1928da097235bf3214b559
| 701,151
|
import re
def align_shiplabels(text):
"""Return the announcement with the ship labels aligned."""
# See comment in is_unscheduled() for a description of the regex,
def monospace(matched):
return '`{}`'.format(matched.group(0))
return re.sub('^\d\d:(?!\d\d\s)', monospace, text, flags=re.MULTILINE)
|
c266846f67d212f8cc996f65b46cccc3cb51ff31
| 701,152
|
def imag(x):
"""Returns the imaginary part of a complex tensor.
:param x: The complex tensor
:type x: torch.Tensor
:returns: The imaginary part of `x`; will have one less dimension than `x`.
:rtype: torch.Tensor
"""
return x[1, ...]
|
e2a9a3ba22a4ec896a60b3991618f32014d088fd
| 701,153
|
import torch
def unflatten_array(X, N, param_shapes):
"""Takes flattened array and returns in natural shape for network
parameters."""
return [torch.reshape(X[N[i]:N[i + 1]], s) \
for i, s in enumerate(param_shapes)]
|
1b127de0d8389d2b44d4f50c3cb85429d2629107
| 701,154
|
import math
def roundoff(a, digit=2):
"""
roundoff the number with specified digits.
:param a: float
:param digit:
:return:
:Examples:
>>> roundoff(3.44e10, digit=2)
3.4e10
>>> roundoff(3.49e-10, digit=2)
3.5e-10
"""
if a > 1:
return round(a, -int(math.log10(a)) + digit - 1)
else:
return round(a, -int(math.log10(a)) + digit)
|
c2bded960f9fa6431a03f6562b49f73477379a32
| 701,155
|
def Hx(sk, messages):
"""A helper function Hx"""
assert len(messages) == len(sk) - 1
total = sk[0]
for xi, mi in zip(sk[1:], messages):
total = total + (xi * mi)
return total
|
609794cc6bcf7b6321192d23a0d4903ffce05b91
| 701,158
|
def _unsur(s: str) -> str:
"""Merge surrogates."""
return s.encode("utf-16", "surrogatepass").decode("utf-16", "surrogatepass")
|
d6bc230a77c735c9922ec66aa6a3537beea5b42f
| 701,159
|
import urllib3
from bs4 import BeautifulSoup
def get_html_page(page_url):
""" Get the whole page source code"""
http = urllib3.PoolManager()
response = http.request('GET', page_url)
soup = BeautifulSoup(response.data, 'html.parser')
response.release_conn()
return soup
|
5c535322f713d94c541d423e79e780016977d4e6
| 701,160
|
def _split_comma_separated(string):
"""Return a set of strings."""
return set(filter(None, string.split(',')))
|
855c23c7a6602c9306b73016cc66573773d1d502
| 701,161
|
def interpolate(xs: list, ys: list, zs: list, ratio: float) -> list:
""" This interpolates between two points """
x = xs[1] - xs[0]
y = ys[1] - ys[0]
z = zs[1] - zs[0]
return [xs[0] + x * ratio, ys[0] + y * ratio, zs[0] + z * ratio]
|
63816ec83adbabef21ea92ee36a92884417755cc
| 701,162
|
import hashlib
def str_to_sha256(s: str):
"""Generate a 256 bit integer hash of an arbitrary string."""
return int.from_bytes(hashlib.sha256(s.encode("utf-8")).digest(), "big")
|
90fcae50485e1469cdb0f363e4110011d5e6b642
| 701,163
|
def cityscapes_palette(num_cls=None):
"""
Generates the Cityscapes data-set color palette.
Data-Set URL:
https://www.cityscapes-dataset.com/
Color palette definition:
https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
Original source taken from:
https://gluon-cv.mxnet.io/_modules/gluoncv/utils/viz/segmentation.html .
`num_cls`: the number of colors to generate
return: the generated color palette
"""
palette = [
128, 64, 128,
244, 35, 232,
70, 70, 70,
102, 102, 156,
190, 153, 153,
153, 153, 153,
250, 170, 30,
220, 220, 0,
107, 142, 35,
152, 251, 152,
0, 130, 180,
220, 20, 60,
255, 0, 0,
0, 0, 142,
0, 0, 70,
0, 60, 100,
0, 80, 100,
0, 0, 230,
119, 11, 32,
]
if num_cls is not None:
if num_cls >= len(palette):
raise Exception("Palette Color Definition exceeded.")
palette = palette[:num_cls*3]
return palette
|
b1a69a4e511e541f13aeb584003b7f4ec0dde956
| 701,164
|
def try_with_lazy_context(error_context, f, *args, **kwargs):
"""
Call an arbitrary function with arbitrary args / kwargs, wrapping
in an exception handler that attaches a prefix to the exception
message and then raises (the original stack trace is preserved).
The `error_context` argument should be a lambda taking no
arguments and returning a message which gets prepended to
any errors.
"""
try:
return f(*args, **kwargs)
except Exception as e:
msg = error_context()
e.args = tuple(
["%s:\n%s" % (msg, e.args[0])] + [a for a in e.args[1:]]
)
raise
|
7e2a4cfee7b4acf5a449b4b07f8c56baca5a63d3
| 701,165
|
def make_tweet_fn(text, time, lat, lon):
"""An alternate implementation of make_tweet: a tweet is a function.
>>> t = make_tweet_fn("just ate lunch", datetime(2012, 9, 24, 13), 38, 74)
>>> tweet_text_fn(t)
'just ate lunch'
>>> tweet_time_fn(t)
datetime.datetime(2012, 9, 24, 13, 0)
>>> latitude(tweet_location_fn(t))
38
"""
def tweet(string):
if string == 'text':
return text
elif string == 'time':
return time
elif string == 'lat':
return lat
elif string == 'lon':
return lon
return tweet
# Please don't call make_tweet in your solution
|
3f465f91dce37641e1ce391f6025c742d9e6ef36
| 701,166
|
import pandas as pd
def noaaDateConv(dataframe):
"""
This function takes a dataframe with datetime values and converts it into a format that the NOAA ccg tool can
easily read
:param dataframe: A dataframe that has to have a column labeled 'datetime' which contains dt.datetime formatted
items
:return: the same dataframe with the datetime column replaced by year, month, day, hour, and minute
"""
year, month, day, hour, minute, cpd = [], [], [], [], [], [] # preallocate lists
cpd_name = dataframe.columns[1] # get the cpd name
# iterate through rows and append lists, seperating components of the datetime
for index, value in dataframe.iterrows():
year.append(value.datetime.year)
month.append(value.datetime.month)
day.append(value.datetime.day)
hour.append(value.datetime.hour)
minute.append(value.datetime.minute)
cpd.append(value[cpd_name])
# drop previous columns
dataframe.drop(['datetime', cpd_name], axis=1, inplace=True)
dataframe = dataframe.reset_index()
dataframe.drop('index', axis=1, inplace=True)
# append each list to the new dataframe in appropriate order
for item in [year, month, day, hour, minute, cpd]:
item = pd.Series(item)
dataframe = dataframe.merge(item.to_frame(), left_index=True, right_index=True, how='inner')
# rename columns
dataframe.columns = ['year', 'month', 'day', 'hour', 'minute', cpd_name]
return dataframe
|
2556009e803ce1406ed5845636ff7cad4d5c1d21
| 701,167
|
def locale_factory(factory):
"""
Decorator which defines a factory function which
set forms locale. If not defined locale 'en_US' is used
:param factory: function
:return: str with locale
"""
global _get_locale
_get_locale = factory
return factory
|
10c306a54ddfd7215c3fa12b0ceacb4e8f177e0e
| 701,168
|
def speedup(i):
"""
Input: {
samples1 - list of original empirical results
samples2 - list of new empirical results (lower than original is better)
(key1) - prefix for min/max/mean in return dict
(key2) - prefix for min/max/mean in return dict
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
{key1}_min
{key2}_min
{key1}_max
{key2}_max
{key1}_mean
{key2}_mean
{key1}_var
{key2}_var
{key1}_delta
{key2}_delta
{key1}_center
{key2}_center
naive_speedup
naive_speedup_min
naive_speedup_var - always >= 1.0
}
"""
s1=i['samples1']
s2=i['samples2']
s1min=min(s1)
s1max=max(s1)
s2min=min(s2)
s2max=max(s2)
s1mean=float(sum(s1))/len(s1)
s2mean=float(sum(s2))/len(s2)
s1delta=s1max-s1min
s2delta=s2max-s2min
s1var=None
if s1mean!=0: s1var=s1delta/s1mean
s2var=None
if s2mean!=0: s2var=s2delta/s2mean
s1center=s1min+float(s1delta/2)
s2center=s2min+float(s2delta/2)
# naive speedups
ns_mean=s1mean/s2mean
ns_min=s1min/s2min
# check keys
k1=i.get('key1','')
if k1=='': k1='s1'
k2=i.get('key2','')
if k2=='': k2='s2'
# Normally we should simply not use speedup if variation of both
# variables is too high and there is an overlap ...
# We should also perform stat analysis, but as some metric
# we temporally calculate best and worst possible speedup
# (again it should be used more as hints)
if ns_mean>ns_min:
nsv=ns_mean/ns_min
else:
nsv=ns_min/ns_mean
# perform statistical analysis, if available
# and detect expected value(s) and confidence interval
# import pandas as pd
rr={'return':0, k1+'_min':s1min, k1+'_max':s1max,
k2+'_min':s2min, k2+'_max':s2max,
k1+'_mean':s1mean, k2+'_mean': s2mean,
k1+'_var':s1var, k2+'_var': s2var,
k1+'_delta':s1delta, k2+'_delta': s2delta,
k1+'_center':s1center, k2+'_center': s2delta,
'naive_speedup':ns_mean,
'naive_speedup_min':ns_min,
'naive_speedup_var':nsv}
return rr
|
053a6d0831f8d11f34fa29e5117ea7c66e5f4bad
| 701,169
|
def calc_d(D_o, t, inner=False):
"""Return the outer or inner diameter [m].
:param float D_o: Outer diameter [m]
:param float t: Layer thickness [m]
:param boolean inner: Select diameter
"""
if inner:
return D_o - 2 * t
else:
return D_o + 2 * t
|
eaadfed210c0b06b7fd0bbc500fe093461d9766c
| 701,170
|
def rjd_to_jdn(rjd: int) -> int:
"""Return Julian day number (JDN) from Reduced Julian Day (RJD) number.
:param rjd: Reduced Julian Day (RJD) number.
:type rjd: int
"""
return rjd + 2400000
|
cfb26706b16f6421449353c97960e417f77a4647
| 701,171
|
def ask_for_path():
"""asks for the path to the GPS-device and returns it
if no path is specified: returns the standard PATH"""
print("\nGib den Pfad zum GPS-Geraet ein (NICHT zum Unterordner 'GPX').")
print("Falls Standardpfad uebernommen werden soll: keine Eingabe")
inp = input(">> ")
if inp == "":
return "default"
else:
return inp
|
1bb66e2faf21993f36a44a7a8caa158cd6514745
| 701,173
|
def is_neq_prefix(text_1, text_2):
"""Return True if text_1 is a non-equal prefix of text_2"""
return text_1 != text_2 and text_2.startswith(text_1)
|
f9e7f835ec577dc539cd586da5e6f9e2e0903a74
| 701,174
|
def find_common_cond_attrs(sql_obj1, sql_obj2):
"""
Find common attributes for SQL join condition
:param sql_obj1:
:param sql_obj2:
:return:
"""
commonpr_list = []
for project1 in sql_obj1.pr_list:
for project2 in sql_obj2.pr_list:
if project1.alias == project2.alias:
commonpr_list.append(project1)
return commonpr_list
|
20db9c2f7d8ad1479e94411f7384fcc52a6b90f4
| 701,175
|
import json
def read_config(fn, perm="r"):
"""
Read config file which consists parameters used in program.
:param str fn: Name of config file (config.json)
:param str perm: Mode in which the file is opened
:return: Config file paramteres
"""
with open(fn, perm) as file:
config = json.load(file)
return config
|
f77a1dbb9b1e0f9f43dbef745f550485f1003cf7
| 701,176
|
import optparse
def getParse():
"""Desc: get Options parse."""
usage='''\
Desc:
This is dataprocessing Tool FOR Beverly.
# python datapro.py --dir ./dir/data'''
parser = optparse.OptionParser(usage=usage)
parser.add_option("-d","--dir",dest="dataDir",help="This is the data dir path")
parser.add_option("-f","--file",dest="retFile",help="This is the result file")
parser.add_option("-t","--type",dest="type",help="you can choice csv|excel|html|json")
return(parser)
|
0f4c085c857c238f03505abaead76b0c21268fda
| 701,177
|
def get_label_base(label_path):
""" Gets directory independent label path """
return '/'.join(label_path.split('/')[-2:])
|
16748bdb197249f157a288e2c664374ad432e6c7
| 701,178
|
def properties_var():
"""
Stream properties:
1. n_chunks
2. chunk_size
"""
return (200, 250)
|
b5f5619ed1f44fe3c18e865f1374eed26042643d
| 701,179
|
import random
def mutate(file, factor):
"""Mutate $factor% of bytes in the $file"""
file = bytearray(file)
mutations = len(file) * factor
if mutations is 0 and factor is not 0:
mutations = 1
while mutations > 0:
random_byte = random.randint(0,255)
random_position = random.randint(0, len(file)-1)
file[random_position] = random_byte
mutations -= 1
return file
|
663013b46c71858260d055080fd20f5e0cf495eb
| 701,180
|
import uuid
def uuid_uri(prefix):
"""Construct a URI using a UUID"""
return prefix + str(uuid.uuid1())
|
2361583122396296d40ff5b1c284662a918fc871
| 701,181
|
from typing import Any
def is_raw_json_null(py_obj: Any) -> bool:
"""
Checks if the given Python object is a raw JSON null.
:param py_obj: The Python object to check.
:return: True if the Python object is a raw JSON null,
False if not.
"""
# Must be None
return py_obj is None
|
1d206ed8242caeb806a646aa617a3bf6707fd5d8
| 701,182
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.