content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def split_lines(source):
"""
Split selection lines in a version-agnostic way.
Python grammar only treats \r, \n, and \r\n as newlines.
But splitlines() in Python 3 has a much larger list: for example, it also includes \v, \f.
As such, this function will split lines across all Python versions.
"""
return re.split(r"[\n\r]+", source) | df88d55a716a2050e30cdb75701f640ce18a8c99 | 115,551 |
def ema(df, window, targetcol='weightedAverage', colname='ema', **kwargs):
""" Calculates Expodential Moving Average on a 'targetcol' in a pandas
dataframe """
df[colname] = df[targetcol].ewm(
span=window,
min_periods=kwargs.get('min_periods', 1),
adjust=kwargs.get('adjust', True),
ignore_na=kwargs.get('ignore_na', False)
).mean()
return df | 20b1778d7aa947fd86d15f7669274721554bf186 | 115,554 |
def filter_fiona_metadata(dct):
"""Include only valid Fiona keywords for opening a feature collection"""
valid_keys = {"crs", "driver", "schema", "layer", "vfs"}
return {k: v for k, v in dct.items() if k in valid_keys} | af13aa61373e20edd023ebcf4b0987184ae868ee | 115,555 |
import json
def backup_luks2_token(module, device, token_id):
"""Backup LUKS2 token, as we may need to restore the metadata.
Return: <backup> <error>"""
args = ["cryptsetup", "token", "export", "--token-id", token_id, device]
ret, token, err = module.run_command(args)
if ret != 0:
return None, {"msg": "Error during token backup: {0}".format(err)}
try:
token_json = json.loads(token)
except ValueError as exc:
return None, {"msg": str(exc)}
return token_json, None | b22fe828b8b6cbfd8befcfb64bcefc7fd7890588 | 115,561 |
def _geny(xx, func):
"""
Generates an array containing y-values corresponding to given x-coordinates
by using a function y = f(x).
Args:
xx (1darray): Array containing the x-values for which matching
y-values shall be computed.
func (callable object): The function used to generate the data.
Returns:
yy (1darray): Array containing y-values matching the supplied
x-coordinates.
"""
yy = func(xx)
return yy | 5de5ee7a83b76f51d85573099d710b06610e8c8e | 115,564 |
def isbool(s):
"""
Checks whether the string ``s`` represents a boolean.
The string requires Python capitalization (e.g. 'True', not 'true').
:param s: the candidate string to test
:type s: ``str``
:return: True if s is the string representation of a boolean
:rtype: ``bool``
"""
if type(s) in [int,float,bool]:
return True
elif (type(s) != str):
return False
return s in ['True','False'] | 678ff8b287ca6a83df933c9e96f5b01609b5d0ef | 115,566 |
from typing import Dict
from typing import Any
def is_compatible(assignment1: Dict[str, Any], assignment2: Dict[str, Any]):
"""
Check if two (potentially partial) assignments are compatible.
Compatible means that there is no disagreement on variable assignment.
:param assignment1: a dict var ->val
:param assignment2: a dict var ->val
:return: True is the assignment are compatible
"""
inter = set(assignment1.keys()) & set(assignment2.keys())
if len(inter) == 0:
return True
for k in inter:
if assignment1[k] != assignment2[k]:
return False
return True | 80f366fa594b740e15062606cb3a2a5a63ad1933 | 115,570 |
def SetFieldValue(regValue, lsb, fsize, fieldValue):
"""
An internal utility function to assign a field value to a register value.
Perform range check on fieldValue using fsize.
:type regValue: ``int`` or ``long``
:param regValue: The value of the register to parse
:type lsb: ``int``
:param lsb: The least significant bit of the field
:type fsize: ``int``
:param fsize: The size of the field in bits
:type fieldValue: ``int``
:param fieldValue: The new field value
:rtype: ``int`` or ``long``
:return: The new register value
"""
if (-1 << fsize) & fieldValue:
raise ValueError("field value '{}' exceeds range of {} bits".format(fieldValue,
fsize))
msb = lsb + fsize
mask = ~((pow(2, msb) - 1) & ~(pow(2, lsb) - 1))
return (regValue & mask) | (fieldValue << lsb) | bedca3c707fd3600ec1e5e85fc12acdaaf6590fa | 115,571 |
import re
def clean_regex_string(s: str) -> str:
"""Converts regex string pattern for a path into OpenAPI format.
Example::
>>s = '^toy\\/^(?P<toyId>[a-zA-Z0-9-_]+)\\/details'
>>clean_regex_string(s)
'toy/{toyId}/details'
"""
s = s.replace("^", "").replace("\\", "")
regex_pattern = r"\(\?P<([a-zA-Z0-9-_]*)>.*?\)"
return re.sub(regex_pattern, r"{\1}", s).replace("?", "").replace("$", "") | d002992d7743d6a51b3b577f6a8c0a3656d6058f | 115,573 |
def check_nulls(df):
"""
Takes a data frame and returns a sorted list of the number
of null values each column has.
"""
return(df.isnull().sum().sort_values()) | bb6d718f45d0812292b2b09d5db520a657ccc494 | 115,575 |
import mimetypes
def get_mimetype(url):
"""
Guess based on the file extension.
Args:
url (text): Web url that was linked to by a reddit submission.
Returns:
modified_url (text): The url (or filename) that will be used when
constructing the command to run.
content_type (text): The mime-type that will be used when
constructing the command to run. If the mime-type is unknown,
return None and the program will fallback to using the web
browser.
"""
filename = url.split('?')[0]
filename = filename.split('#')[0]
content_type, encoding = mimetypes.guess_type(filename)
return url, content_type | 3e46df184eedf03fabf95b0aa04ac6fc135af5cc | 115,582 |
import random
def generate_chunks(total, min_val, max_val, num_chunks=-1):
"""
Randomly generate a list of integers l such that sum(l) = total and for each x in l, min_val <= x <= max_val.
If num_chunks > 0, it is guaranteed that the list contains exactly num_chunks elements.
"""
if num_chunks <= 0:
chunks = []
while total > 0:
next_chunk_size = random.randint(min(total, min_val), min(total, max_val))
if 0 < total - next_chunk_size < min_val:
continue
total -= next_chunk_size
chunks.append(next_chunk_size)
return chunks
else:
if total < num_chunks * min_val:
raise ValueError('Total ({}) must be >= num_chunks * min_val ({}*{})'.format(total, num_chunks, min_val))
if total > num_chunks * max_val:
raise ValueError('Total ({}) must be <= num_chunks * max_val ({}*{})'.format(total, num_chunks, max_val))
total -= num_chunks * min_val
chunks = None
while not chunks or any([x > max_val for x in chunks]):
split_points = [0, total]
for _ in range(num_chunks - 1):
split_points.append(random.randint(0, total))
split_points.sort()
chunks = [split_points[i + 1] - split_points[i] + min_val for i in range(len(split_points) - 1)]
return chunks | a76c2f206801b339b203197249bc969cf91c7caa | 115,584 |
from typing import Union
from typing import List
import torch
from typing import Tuple
def exact_match_rate(
real_start: Union[List[List[torch.Tensor]], torch.Tensor],
real_end: Union[List[List[torch.Tensor]], torch.Tensor],
pred_start: torch.Tensor,
pred_end: torch.Tensor
) -> Tuple[int, int, float]:
"""
Takes as input the real and predicted start and end tokens of N answers and returns the number of correct
predictions and the total match rate. An answer where only either the start or end token has been predicted
correctly will count as 50% match rate for that answer. While only one predicted start and end can be given for
an answer, multiple start-end pairs can be given for the ground truth values, as multiple correct answers may be
acceptable. If multiple possible ground truth answers are provided, a prediction is considered correct if its
values match exactly at least one of the ground truth values, and wrong otherwise.
:param real_start: the ground truth value of the N answers' start token indices. If only one ground truth value per
answer is given, this can be a torch.tensor if shape (N). If multiple valid answers are provided, this
must be a list of N lists, where each inner list contains m torch.tensors, where m is the number of
possible correct answers. Note that m can vary for each inner list depending on how many valid answers each
question has. Given this variability, it is not possible to convert the outer list to a tensor as the
inner lists of tensors have variable lengths.
:param real_end: the ground truth value of the N answers' end token indices. If only one ground truth value per
answer is given, this can be a torch.tensor if shape (N). If multiple valid answers are provided, this
must be a list of N lists, where each inner list contains m torch.tensors, where m is the number of
possible correct answers. Note that m can vary for each inner list depending on how many valid answers each
question has. Given this variability, it is not possible to convert the outer list to a tensor as the
inner lists of tensors have variable lengths.
:param pred_start: the predicted values of the N answers' start token indices. Must be a torch.tensor of shape (N),
with only one predicted token per answer.
:param pred_end: the predicted values of the N answers' end token indices. Must be a torch.tensor of shape (N),
with only one predicted token per answer.
:return: correct: the total number of correct predictions out of the total number of predictions which is 2*N (i.e.
N starts and N ends).
total_indices: The total number of predictions, i.e. 2*N (N starts and N ends)
match_rate: the exact match rate defined as the ratio correct/total_indices
"""
assert len(real_start) == len(real_end), "real_start and real_end shapes do not match."
assert pred_start.shape == pred_end.shape, "pred_start and pred_end lengths do not match."
assert len(real_start) == len(pred_start), \
f"Datasets mismatch: {len(real_start)} correct labels and {len(pred_start)} predictions were provided."
correct = 0
total_indices = len(pred_start) + len(pred_end)
for i, (pred_start_sample, pred_end_sample) in enumerate(zip(pred_start, pred_end)):
'''The list below list will store how many correct predictions (start+end) the algorithm makes for every
correct possible answer. E.g. if there are 3 possible correct answers, and the algorithm predicts start+end
correctly for the first answer, only correct start of the second possible answer, and not the third
possible answer the list will be [2, 1, 0]. We'll take take the max (in this case 2), as any correct possible
answer means our model made a correct prediction.'''
match_options = []
# each sample might have j correct possible answers
for real_start_sample, real_end_sample in zip(real_start[i], real_end[i]):
matches = 0
if pred_start_sample == real_start_sample:
matches += 1
if pred_end_sample == real_end_sample:
matches += 1
match_options.append(matches)
correct += max(match_options)
match_rate = correct / total_indices
return correct, total_indices, match_rate | f916bbb1abf02a2ac36754740ff80c4793cce298 | 115,587 |
def allowed_file(filename):
"""Returns `True` if file extension is `.tar`"""
return '.' in filename and filename.rsplit('.', 1)[1] in ['tar'] | 7fc0b68130cd05aa29976f0692217e3fc6a04269 | 115,590 |
import colorsys
def get_hue(image):
"""Return the most common hue of the image."""
hues = [0 for x in range(256)]
# Iterate each pixel. Count each hue value in `hues`.
for pixel in image.getdata():
# Ignore greyscale pixels.
if pixel[0] == pixel[1] and pixel[1] == pixel[2]:
continue
# Ignore non-opaque pixels.
if pixel[3] < 255:
continue
h, l, s = colorsys.rgb_to_hls(*[x / 255.0 for x in pixel[:3]])
# Get a tally of the hue for that image.
hues[int(h * 255)] += 1
return hues.index(max(hues)) | e4420bcdb5a6e13b784fb3d2d290206cb9183f47 | 115,591 |
def split(s, delimiter=','):
"""Split a string on given delimiter or newlines and return the results stripped of
whitespace"""
return [
item.strip() for item in s.replace(delimiter, '\n').splitlines() if item.strip()
] | 2a3b60488a6c2ef78aa3ff60b8365408b5cf7cd7 | 115,593 |
import pickle
def stop_idx_in_route(stop_times_file, FOLDER):
"""
This function saves a dictionary to provide easy access to index of a stop in a route.
Args:
stop_times_file (pandas.dataframe): stop_times.txt file in GTFS.
FOLDER (str): path to network folder.
Returns:
idx_by_route_stop_dict (dict): Keys: (route id, stop id), value: stop index. Format {(route id, stop id): stop index in route}.
"""
pandas_group = stop_times_file.groupby(["route_id","stop_id"])
idx_by_route_stop = {route_stop_pair:details.stop_sequence.iloc[0] for route_stop_pair, details in pandas_group}
with open(f'./dict_builder/{FOLDER}/idx_by_route_stop.pkl', 'wb') as pickle_file:
pickle.dump(idx_by_route_stop, pickle_file)
print("idx_by_route_stop done")
return idx_by_route_stop | ae2ee28f1035024a410829f96c1ee5fc5dc71245 | 115,595 |
def find_key_in_list(key, key_list):
"""
Look for the right corresponding key in a list.
key has to start with an given key from the list and the longest key
will be returned.
Parameters
----------
key : str
Given key.
key_list : list of str
Valid keys to be checked against.
Returns
-------
found_key : :class:`str` or :class:`None`
The best match. None if nothing was found.
"""
found = []
for try_key in key_list:
if key.startswith(try_key):
found.append(try_key)
if found:
found_key = max(found, key=len)
# "" would be allowed for any given key
if found_key:
return found_key
return None | a0d921ea3d2ff1acf7df1fe6bdf22c4ca8ea9188 | 115,605 |
def get_suffix_base64(raw_base64):
"""
get suffix and base code
:param raw_base64: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA
:return: png, iVBORw0KGgoAAAANSUhEUgAA
"""
parts = raw_base64.split(',', 1)
code = parts[1]
splits = parts[0].split(';')[0].split('data:')[1].split('/')
style, suffix = splits[0], splits[1]
return code, suffix | 2bcd1ed74c5f0c66d525d6d66647ed99a77ea1ee | 115,610 |
from typing import List
def make_squares(arr: List[int]) -> List[int]:
"""
Time Complexity: O(len(arr))
Space Complexity: O(len(arr))
:param arr: input array
:return: new array containing squares of all the number of the input array in the sorted order
"""
n = len(arr)
p1 = 0
p2 = n - 1
idx = p2
squares = [0] * n
while p1 < p2:
left_square = arr[p1] * arr[p1]
right_square = arr[p2] * arr[p2]
if left_square < right_square:
squares[idx] = right_square
p2 -= 1
else:
squares[idx] = left_square
p1 += 1
idx -= 1
return squares | d987e55ea93700fea64dd1ca92b752d611b0ce7b | 115,614 |
def read_taxonomies_to_design_for(fn):
"""Read different taxonomies to design for.
The columns must be, in order:
1) a taxonomic (e.g., species) ID from NCBI
2) a segment label, or 'None' if unsegmented
Args:
fn: path to TSV file, where each row corresponds to a taxonomy
Returns:
collection of (taxonomic-id, segment)
"""
taxs = []
with open(fn) as f:
for line in f:
ls = line.rstrip().split('\t')
if len(ls) != 2:
raise Exception(("Input fasta TSV must have 2 columns"))
try:
tax_id = int(ls[0])
except ValueError:
raise Exception(("Taxonomy ID '%s' must be an integer") %
ls[0])
segment = ls[1]
if segment.lower() == 'none':
segment = None
taxs += [(tax_id, segment)]
return taxs | a2f170c820c5ad95149e27d7907886cd264aee70 | 115,616 |
import re
def identify_hypers(df_fit):
"""Identify hyperparameters.
Arguments:
df_fit: A pd.DataFrame of model fit data.
Returns:
hypers: A list of strings.
"""
hypers = []
for col_name in list(df_fit.columns):
if re.match(r'hyp_*.', col_name):
hypers.append(col_name)
return hypers | 82e0334d6c20ecb6a1cddadae112b2ef1fcaf7cc | 115,618 |
import binascii
def _pack_mac(mac_address):
"""Pack a mac_address into binary."""
return binascii.unhexlify(mac_address.replace(':', '')) | 40e9edfdb9a134c95a333bd3ece8af5671c6454b | 115,619 |
from datetime import datetime
def monthlyVector(startyr,endyr,startmth,endmth):
"""
Returns two datetime vectors with start and end dates at the start and end of months
"""
month=[]
year=[]
m0 = startmth
y0 = startyr
while m0 <= endmth or y0 <= endyr:
month.append(m0)
year.append(y0)
if m0 == 12:
m0 = 1
y0 += 1
else:
m0 += 1
time=[]
for mm,yy in zip(month,year):
time.append(datetime(yy,mm,1))
return time[0:-1],time[1:] | 935a4d4532aa3714eef6d1be1093d96656c05e19 | 115,620 |
from typing import List
def make_low_rcp_list(predicted_temps_dict: dict) -> List[float]:
"""Return a list of RCP 2.6 temperature values
"""
low_rcp_list = []
for year in list(predicted_temps_dict.keys()):
low_rcp_list.append(predicted_temps_dict[year]['RCP 2.6'])
return low_rcp_list | 9726fb085547bad71dfe9a111bc019757179756c | 115,621 |
def is_dunder(val: str) -> bool:
"""Returns True if a __dunder__ name, False otherwise."""
return (len(val) > 4 and
val[:2] == val[-2:] == '__' and
val[2] != '_' and
val[-3] != '_') | f285412ed90c4d338b7984b089f2d74ad44820f4 | 115,624 |
import tempfile
def build_ansible_inventory(config=None, fw_ip=None):
"""
Build a basic ini-style Ansible inventory file.
Parameters:
config (dict): Optional dictionary representing the current configuration file (used in production mode).
fw_ip (string): Optional firewall IP address (used in test mode).
Returns:
file object: File descriptor of the file containing the Ansible inventory.
"""
inv = tempfile.NamedTemporaryFile(mode="w", delete=False)
if fw_ip:
inv.write(fw_ip + "\n")
else:
if not config or "production" not in config or "firewalls" not in config["production"]:
raise Exception("The configuration must include a production.firewalls section")
for fw in config["production"]["firewalls"]:
inv.write(fw + "\n")
inv.close()
return inv | 6d4c66cb21a39368d6610473d27eca241a22f9db | 115,626 |
import re
def replace(s, pattern, replacement):
"""Replaces occurrences of a match string in a given
string and returns the new string. The match string
can be a regex expression.
Args:
s (str): the string to modify
pattern (str): the search expression
replacement (str): the string to replace each match with
"""
# the replacement string may contain invalid backreferences (like \1 or \g)
# which will cause python's regex to blow up. Since this should emulate
# the jam version exactly and the jam version didn't support
# backreferences, this version shouldn't either. re.sub
# allows replacement to be a callable; this is being used
# to simply return the replacement string and avoid the hassle
# of worrying about backreferences within the string.
def _replacement(matchobj):
return replacement
return re.sub(pattern, _replacement, s) | 1bde773ebdc49c3c5541c7706959c3024ae20a98 | 115,627 |
def populate_table(conn, sql, rows):
""" insert multiple rows of data into a table
:param conn: Connection object
:param sql: INSERT INTO statement
:param rows: the data as a list of rows
:return: lastrowid
"""
cur = conn.cursor()
cur.executemany(sql, rows)
conn.commit()
return cur.lastrowid | 7e2caa7965d46c3ec95ce4354b5766d4a0f36c10 | 115,628 |
def is_solved(neighbours, colors):
"""Checks for all countries, if all their neighbours have another color.
:param neighbours: A dict with country as keys, list of neighbours as values
:param colors: A list indicating which country has which color. First element in this list corresponds
to color of first country
:return: Boolean
"""
if None in colors:
return False
elif len(neighbours) != len(colors):
return False
for country in neighbours:
color = colors[country]
for neighbour in neighbours[country]:
if colors[neighbour] == color:
return False
return True | a1f610e9a03efc10de858e96e4f67f04ad0de0ea | 115,630 |
async def is_record(value):
"""
Checks whether a value is a Record ID or a list of Record IDs
Args:
value (``obj``): any value retrieved from an airtable record field.
Returns:
(``bool``): True if value is Record ID or a list of Record IDs
"""
if isinstance(value, list) and value:
value = value[0]
return isinstance(value, str) and value[0:3] == "rec" and len(value) == 17 | 7834dd6613e54d845bbf085763b238e15e40dc15 | 115,633 |
def TransToRp(T):
"""Converts a homogeneous transformation matrix into a rotation matrix
and position vector
:param T: A homogeneous transformation matrix
:return R: The corresponding rotation matrix,
:return p: The corresponding position vector.
Example Input:
T = np.array([[1, 0, 0, 0],
[0, 0, -1, 0],
[0, 1, 0, 3],
[0, 0, 0, 1]])
Output:
(np.array([[1, 0, 0],
[0, 0, -1],
[0, 1, 0]]),
np.array([0, 0, 3]))
"""
return T[0: 3, 0: 3], T[0: 3, 3] | 9a0c1682c4f65f1367a59b955a6b3c0d813a1303 | 115,636 |
def host_available(compute_host):
""":returns: `True` if compute host is enabled in nova, `False`
otherwise"""
return compute_host.state == 'up' and compute_host.status == 'enabled' | 0ec053887a0e3b67c53c634c5e93babf12677c47 | 115,637 |
def recurPower(base, exp):
"""
base: int or float.
exp: int >= 0
returns: int or float, base^exp
"""
if exp == 0:
return 1
else:
return base * recurPower(base, exp-1) | d40bb0230889414e7567eccf5d89086bc81894fb | 115,640 |
import base64
def b64_decode(data):
"""Decodes standard unpadded base64 encoded string."""
mod = len(data) % 4
if mod:
data += '=' * (4 - mod)
return base64.b64decode(data) | c5ae3a782f005cd72c62358903e8bdbfc45215cc | 115,642 |
import json
def load_features(features_path):
"""
Reading the features from disk.
:param features_path: Location of feature JSON.
:return features: Feature hash table.
"""
features = json.load(open(features_path))
features = {str(k): [str(val) for val in v] for k,v in features.items()}
return features | 122aaa5e16e3165b4c3a7937bba9d73e55ac918a | 115,645 |
import math
def factor(n):
"""Simple factorization function. Makes sure to only try each number once."""
factors = []
for i in range(2, int(math.sqrt(n)) + 1):
if i not in factors and not n % i:
factors.append(i)
factors.append(n / i)
return factors | 42f4942d2dd2c340e2b1f6f315267aec7e693ea6 | 115,646 |
import torch
import math
def bucket_values(
distances: torch.Tensor, num_identity_buckets: int = 4, num_total_buckets: int = 10
) -> torch.Tensor:
"""
Places the given values (designed for distances) into `num_total_buckets`semi-logscale
buckets, with `num_identity_buckets` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
# Parameters
distances : `torch.Tensor`, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: `int`, optional (default = `4`).
The number of identity buckets (those only holding a single value).
num_total_buckets : `int`, (default = `10`)
The total number of buckets to bucket values into.
# Returns
`torch.Tensor`
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (
num_identity_buckets - 1
)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1) | fc445c7ffbb8f058adbf3dd5ad22e5586975f8e4 | 115,667 |
def _indexing(x, indices):
"""
:param x: array from which indices has to be fetched
:param indices: indices to be fetched
:return: sub-array from given array and indices
"""
# np array indexing
if hasattr(x, 'shape'):
return x[indices]
# list indexing
return [x[idx] for idx in indices] | 8061aee1464ffef0bc13eab0c59b472fd5dc140c | 115,670 |
def strtime(time):
"""Takes a number of seconds; returns a formatted duration string"""
years, days, hours, mins = 0, 0, 0, 0
timestr = ""
if time > 31536000:
years = int(time / 31536000)
time = time - (31536000 * years)
if time > 86400:
days = int(time / 86400)
time = time - (86400 * days)
if time > 3600:
hours = int(time / 3600)
time = time - (3600 * hours)
if time > 60:
mins = int(time / 60)
time = time - (60 * mins)
if years > 0:
return "{}y {}d {:02d}h {:02d}min {:02d}s".format(years, days, hours, mins, int(time))
if days > 0:
return "{}d {:02d}h {:02d}min {:02d}s".format(days, hours, mins, int(time))
return "{:02d}h {:02d}min {:02d}s".format(hours, mins, int(time)) | b1444f1dc1ade84917a0536ed5a6f4691d2e3cfa | 115,674 |
def gen(x):
"""
Return the generator of ``x``.
EXAMPLES::
sage: R.<x> = QQ[]; R
Univariate Polynomial Ring in x over Rational Field
sage: gen(R)
x
sage: gen(GF(7))
1
sage: A = AbelianGroup(1, [23])
sage: gen(A)
f
"""
return x.gen() | b829b698625c3dc4b266f59ca80cf7941466ef3f | 115,685 |
def expToTrain(exp,start=None):
"""Converts a bunch of individual domain values to lists, because each domain value must be iterable for training data."""
x_train = []
for data in exp[start:,0]:
x_train.append([data, ])
return x_train | 35033df4d5b1a0178e73ce1678b31317b0d8125b | 115,686 |
def calculate_segment_name(host_name, recorder):
"""
Returns the segment name based on recorder configuration and
input host name. This is a helper generally used in web framework
middleware where a host name is available from incoming request's headers.
"""
if recorder.dynamic_naming:
return recorder.dynamic_naming.get_name(host_name)
else:
return recorder.service | 230e8ec8cefe953a54bad0e81a0768521f741d83 | 115,690 |
def stations_by_river(stations):
"""takes list of station objects
returns dictionary of format {river: list of stations on this river}"""
stations_by_river_dictionary = {}
# if station is in dictionary, add to to appropriate list, else create new key and item
for object in stations:
if object.river in stations_by_river_dictionary:
stations_by_river_dictionary[object.river].append(object)
else:
stations_by_river_dictionary[object.river] = [object]
return stations_by_river_dictionary | 6cf2fe1baa38e69fd50bf85d1ef1df8e26731c9b | 115,692 |
def get_status(i2c_hat):
"""Gets the I2C-HAT status word. The exported robotframework keyword is 'Get Status'.
Args:
i2c_hat (I2CHat): board
Returns:
int: The status word
"""
return i2c_hat.status.value | 364669d61cbab620b575fe49ded8bead465bf381 | 115,695 |
def popcount(x):
"""
count the number of high bits in the integer `x`.
Taken from https://en.wikipedia.org/wiki/Hamming_weight
"""
# put count of each 2 bits into those 2 bits
x -= (x >> 1) & 0x5555555555555555
# put count of each 4 bits into those 4 bits
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
# put count of each 8 bits into those 8 bits
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x += x >> 8 # put count of each 16 bits into their lowest 8 bits
x += x >> 16 # put count of each 32 bits into their lowest 8 bits
x += x >> 32 # put count of each 64 bits into their lowest 8 bits
return x & 0x7f | 924e071eaaf46d312ed31ffe4cc53af9dc84a6e0 | 115,697 |
import time
import torch
def record_ddp_fit_model_stats(trainer, model, use_cuda):
"""Helper to calculate wall clock time for fit + max allocated memory.
Args:
trainer: The trainer object.
model: The model to fit.
use_cuda: Whether to sync CUDA kernels.
Returns:
Max Memory if using GPUs, and total wall clock time.
"""
max_memory = None
time_start = time.perf_counter()
if use_cuda:
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
trainer.fit(model)
if use_cuda:
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated() / 2 ** 20
total_time = time.perf_counter() - time_start
return max_memory, total_time | fdb6d7bd2ce6cb3a38cf9ab491be541863d05fd6 | 115,703 |
def get_transfer_indicator(filename, transferred, total, spaces=50):
"""Returns progress indicator for byte stream transfer."""
if not total:
return "{}, ?".format(filename)
transferred = min(transferred, total)
spaces_filled = int(spaces * transferred / total)
return "{}, [{}] {}kB".format(
filename,
"·" * spaces_filled + " " * (spaces - spaces_filled - 1),
transferred // 1024,
) | 50d5b8bd65d165e07f7c0e9b6a694bfdca38580b | 115,704 |
def fix_lon(lon):
"""
:returns: a valid longitude in the range -180 <= lon < 180
>>> fix_lon(11)
11
>>> fix_lon(181)
-179
>>> fix_lon(-182)
178
"""
return (lon + 180) % 360 - 180 | 06d8aa05cb949b795b6aa79c4cbd86e3bb62efeb | 115,705 |
import logging
def load_files_serial(feature_files, load_function, **kwargs):
"""
Function for loading the feature files serially.
:param feature_files: The collection of feature files to load.
:param load_function: The function to use for loading the feature files.
:param kwargs: keyword arguments for the load function.
:return: A list of loaded feature data frames or numpy arrays.
"""
logging.info("Reading files serially")
return [load_function(files, **kwargs)
for files in feature_files] | 63eaaa4db4cacd54f8d9ce8984bcbb8b6f6e1250 | 115,707 |
def _extract_one_stim_dur(stim_durs):
"""
In case of multiple photostim durations - pick the shortest duration
In case of no photostim durations - return the default of 0.5s
"""
default_stim_dur = 0.5
if len(stim_durs) == 0:
return default_stim_dur
elif len(stim_durs) > 1:
print(f'Found multiple stim durations: {stim_durs} - select {min(stim_durs)}')
return float(min(stim_durs))
else:
return float(stim_durs[0]) if len(stim_durs) == 1 and stim_durs[0] else default_stim_dur | d09ee8e83420d5c0fce17d3ab09459ba65cffd93 | 115,708 |
def read_images(cursor):
"""Returns the ID, recorded datetime and image content of all Testbed images."""
cursor.execute('SELECT id, recorded, testbed_image FROM observations WHERE '
'testbed_image IS NOT NULL ORDER BY id ASC')
return cursor.fetchall() | 58b4d6a5460d8b0a4e4d9a2c561c56796a265cd3 | 115,717 |
def is_url(output_location: str) -> bool:
"""Returns True if the output location is a URL."""
return output_location.startswith(('http://',
'https://')) | fd635193edffec8d633d7348a0585550aad99977 | 115,720 |
def get_rgb_channel(image, channel):
""" Converts an image to RGB and extracts a single channel """
return image[:, :, channel.value] | 35e7be9b99fdf4b4677819e4cc78a2123b915821 | 115,721 |
def is_valid_key(key: str) -> bool:
"""Check if an exiftool key is valid and interesting."""
# https://exiftool.org/TagNames/Extra.html
file_keys = (
'FileName', 'Directory', 'FileSize', 'FileModifyDate', 'FileAccessDate',
'FileInodeChangeDate', 'FilePermissions', 'FileType', 'FileType',
'FileTypeExtension', 'MIMEType', 'ExifByteOrder'
)
invalid_prefixes = ("ExifTool", "System", "SourceFile") + tuple(
f"File:{k}" for k in file_keys
)
for invalid_prefix in invalid_prefixes:
if key.startswith(invalid_prefix):
return False
return True | 9c1cecb043ff899d0b6959af4f1848bfd790a59b | 115,728 |
def deduplicate_list_of_periods(periods):
"""
Deduplicates a list of ``Period`` objects, ensuring no two ``Period``s have
the same id``
"""
known_ids = []
result = []
for period in periods:
if period.id not in known_ids:
known_ids.append(period.id)
result.append(period)
return result | 37fe8bb75ec5aeac98744ee2e3aac592c4856f35 | 115,730 |
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes | 3420913a7eb682318d3f7f0d71b8dde9b6123fb5 | 115,731 |
def load_moves(filename):
"""Load a list of moves from a file,
ignoring whitespace."""
with open(filename) as f:
return [line.strip() for line in f if line.strip()] | 138f8d9be8429a460661db719ba42cd7490434f2 | 115,736 |
def _zbc_to_hgvs(i):
"""Convert zero-based coordinate to hgvs (1 based, missing zero)"""
if i >= 0:
i += 1
return i | 6741ec36e95ff9c76cdc0c91ffc4eae91e85686a | 115,737 |
def decode(state):
"""
decodes the taxi env state (int in [0,500) ) to:
(taxirow, taxicol, passloc, destidx) tuple
:param state: int in [0,500)
:return: (taxirow, taxicol, passloc, destidx) tuple
"""
destidx = state % 4
state = state // 4
passloc = state % 5
state = state // 5
taxicol = state % 5
state = state // 5
taxirow = state
assert 0 <= state < 5
return taxirow, taxicol, passloc, destidx | 882df046b30904e0b4494644c36d73ad29256ff1 | 115,740 |
def _has_text(string: str) -> bool:
"""Returns True if a string has non whitespace text."""
string_without_whitespace = string.strip()
return len(string_without_whitespace) > 0 | e3bc86b79c4eec0c5e69836723ca091148dc221f | 115,751 |
import base64
def deserialise_job_command(command):
"""Deserialise job commands received through REST API."""
return base64.b64decode(command).decode("utf-8") | 2ea987c373203cd53e50f0bbc35da5ce77a6d0ed | 115,762 |
def extract(seq, f):
"""
Extract items from a possibly nested sequence.
All items of the possibly nested sequence ``seq`` for which the
callback ``f`` returns ``True`` are returned in a list.
"""
try:
it = iter(seq)
except TypeError:
return []
items = []
for item in it:
if f(item):
items.append(item)
else:
items.extend(extract(item, f))
return items | bfadd26b91921f7ab78451d3fa99b861dfed543a | 115,768 |
import random
def getRescind() -> str:
"""
Returns:
A random message ending for rescinding a user
"""
options=[
"YOU ARE JUST BEGGING FOR RESCINDITAS, AREN'T YOU?",
"swiggity swind, catch this rescind!",
"rescinded, enjoy your time at Yale",
"I hope you like gothic architecture. RESCINDITAS",
"rub my foot in the next 5 seconds or you're rescinded. TOO LATE",
"you get off easy, this time. BAHAHAHA JK. RESCINDED. Did y'all see the look on their face?",
"rescinded.",
"if this message gets at least one like, you're rescinded.",
"I just got off the phone with Brian. He says he's looking forward to taking classes with you at Yale",
"I love you.",
"FETCH ME THE BREASTPLATE STRETCH... whoops wrong bot.",
"You and Kyle Kashuv have something in common now"
]
return random.choice(options) | 5cd909a71290c70f3207d8cae7a835ee4463a36d | 115,769 |
def get_word_from_derewo_line(line):
"""Processor for individual line from DeReWo textfile.
Args:
line: Single line from DeReWo text file
Returns:
Lowercase word, None if invalid
"""
line = line.split()
# skip words with whitespace
if len(line) > 2:
return None
# disregard DeReWo integer for frequency, only word is needed
word = line[0]
# lowercase passwords are good enough with sufficient word list length
return word.lower() | 9ce68515d1950e75d018c075c465abc2d129a681 | 115,770 |
def dotND(v1, v2):
"""Returns dot product of two nD vectors
(same as itemwise multiplication followed by summation)."""
return sum(vv1 * vv2 for vv1, vv2 in zip(v1, v2)) | 12a6adf90e4d3661af3ff088a3b50d595300e2ac | 115,772 |
def experiment_type_from_filename(filename):
"""
Get the experiment type based on filename.
It is assumed that the filename is compliant with
'<some_path>/measurements_<experiment_type>_summary.csv'
:param filename: A string representing the file name. It can be either full
or relative path.
:return: A string representing the filename:
Example:
exp_type = experiment_type_from_filename(
'./measurements_interprocess_best_effort_tcp_summary.csv'
)
exp_type -> interprocess_best_effort_tcp
"""
exp_type = filename.split('/')[-1].split('.')[-2].split('_')[1:-1]
return '_'.join(exp_type) | 758c1a5688a44ccde29678cb6d5fc86d58ec9fa9 | 115,773 |
import re
def _collapse_impacted_interfaces(text):
"""Removes impacted interfaces details, leaving just the summary count."""
return re.sub(
r"^( *)([^ ]* impacted interfaces?):\n(?:^\1 .*\n)*",
r"\1\2\n",
text,
flags=re.MULTILINE) | 504bf7170ea4624d1703ff7ed25106a5bb671834 | 115,774 |
def get_groups_per_chain(chains, ligands, waters):
"""Takes lists of chains, ligands and waters, and returns the chain counts
that should go in the .mmtf file.
:param list chains: the chains to pack.
:param list ligands: the ligands to pack.
:param list waters: the waters to pack.
:rtype: ``list``"""
groups_per_chain = []
for chain in chains:
groups_per_chain.append(len(chain.residues()))
for ligand in ligands:
groups_per_chain.append(1)
water_chains = sorted(set(w._internal_id for w in waters))
for wc in water_chains:
groups_per_chain.append(len([w for w in waters if w._internal_id == wc]))
return groups_per_chain | b35cc9e2c71e4a50e44aa68b366733d4701c6d76 | 115,777 |
from typing import Dict
def is_grpc_enabled(server_config: Dict) -> bool:
"""
Checks if the API has the grpc protocol enabled (cortex.yaml).
Args:
server_config: API configuration.
"""
return server_config["protobuf_path"] is not None | 2fcdf9fede008e3672458690a53e45c6817f2c3d | 115,778 |
def get_matched_rule(command, rules, settings):
"""Returns first matched rule for command."""
for rule in rules:
if rule.match(command, settings):
return rule | 3f82622f78d7338e69eb68f8073812637f321842 | 115,781 |
def s2m(seconds: float) -> float:
"""Convert seconds to minutes."""
return round(seconds / 60, 1) | 2d8c2ae992b0e65679752e891f96ba03b95ac3dd | 115,782 |
import re
def process_field_cancelled_value(value):
"""This method processes the value in field. It checks
for the value 'CANCELED' or 'CANCELLED' in the
<value> tag under the parent <field> tag.
"""
value = value.strip().lower()
cancel_match = re.findall(r'cancel[led$]*', value, re.I)
if not cancel_match:
return False
else:
return True | 197a6a436c9ac8d9661ef8a64a4313fb5929714c | 115,783 |
def get_expiry_date(obj):
"""
A custom field to show the expiry date in the
Moderation Request Admin Changelist in djangocms-moderation.
:param obj: A Moderation Request object supplied from the admin view table row
:return: The expiry date from the matching moderation request object
"""
version = obj.moderation_request.version
if hasattr(version, "contentexpiry"):
return version.contentexpiry.expires | 4b6441f4d03527b771cd3071a3b583e8870e5d89 | 115,786 |
def get_lists_diff(py_list1, py_list2):
"""
Get elements of py_list1 which are not in py_list2.
"""
return list(set(py_list1) - set(py_list2)) | fcae7bf2fd503ed0a59d97e26d96bd2ac579766a | 115,803 |
def pad_left(orig, pad, new_length):
"""
Pad a string on the left side.
Returns a padded version of the original string.
Caveat: The max length of the pad string is 1 character. If the passed
in pad is more than 1 character, the original string is returned.
"""
if len(pad) > 1:
return orig
orig_length = len(orig)
if orig_length >= new_length:
return orig
return (pad * (new_length - orig_length)) + orig | a90f6662847a0c6e3ea8c668c15d4b96cc7d4e43 | 115,805 |
def center(u):
"""Compute the center between edges."""
return 0.5 * (u[1:] + u[:-1]) | e6def381c0e9394a7d5f9ee1e2d91d5aa52da75c | 115,809 |
def is_valid_uuid (uuid):
"""
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
"""
try:
if len (uuid) == 4:
if int (uuid, 16) < 0: return False
elif len (uuid) == 8:
if int (uuid, 16) < 0: return False
elif len (uuid) == 36:
pieces = uuid.split ("-")
if len (pieces) != 5 or \
len (pieces[0]) != 8 or \
len (pieces[1]) != 4 or \
len (pieces[2]) != 4 or \
len (pieces[3]) != 4 or \
len (pieces[4]) != 12:
return False
[ int (p, 16) for p in pieces ]
else:
return False
except ValueError:
return False
except TypeError:
return False
return True | f7db551dc98a9f1a55dcebb1cd13cfa37ac61771 | 115,810 |
def position_to_index(v, nelx, nely):
"""
Convert a position vector to the index of the element containing it
"""
return int(v[0]) + int(v[1])*nelx | a73e335fc7a1baf29a34765ca2a3d633342ea03c | 115,816 |
def fdr(ps,alpha=0.05):
"""Given a list of p-values and a desired significance alpha, find the
adjusted q-value such that a p-value less than q is expected to be
significant at alpha, via the Benjamini-Hochberg method
(appropriate for independent tests).
"""
ps = sorted(ps)
m = len(ps)
ks = [k for k in range(m) if ps[k]<= (k+1)/float(m)*alpha] #k+1 because pvals are 1-indexed.
K = max(ks) if ks else None
return ps[K] if K else None | 0b68b37c285726190416ee3f969dc6c4fcad40ca | 115,823 |
import sympy
def expr_to_json(expr):
"""
Converts a Sympy expression to a json-compatible tree-structure.
"""
if isinstance(expr, sympy.Mul):
return {"type": "Mul", "args": [expr_to_json(arg) for arg in expr.args]}
elif isinstance(expr, sympy.Add):
return {"type": "Add", "args": [expr_to_json(arg) for arg in expr.args]}
elif isinstance(expr, sympy.Symbol):
return {"type": "Symbol", "name": expr.name}
elif isinstance(expr, sympy.Pow):
return {"type": "Pow", "args": [expr_to_json(arg) for arg in expr.args]}
elif isinstance(expr, (float, int)):
return {"type": "Number", "value": expr}
elif isinstance(expr, sympy.Float):
return {"type": "Number", "value": float(expr)}
elif isinstance(expr, sympy.Integer):
return {"type": "Number", "value": int(expr)}
else:
raise NotImplementedError("Type not implemented: " + str(type(expr))) | 75818fb20b60d3548fab106dfc7a7e6f993b70f2 | 115,824 |
import torch
from typing import Tuple
def _extract_sizes(
h: torch.Tensor,
r: torch.Tensor,
t: torch.Tensor,
) -> Tuple[int, int, int, int, int]:
"""Extract size dimensions from head/relation/tail representations."""
num_heads, num_relations, num_tails = [xx.shape[i] for i, xx in enumerate((h, r, t), start=1)]
d_e = h.shape[-1]
d_r = r.shape[-1]
return num_heads, num_relations, num_tails, d_e, d_r | bb94c5d8ddb1d61aa64bc83e143084239a83b92c | 115,827 |
def get_false_times(obs_pair_hash, variable):
"""
Get the timepoints where the variable is false.
Parameters:
obs_pair_hash: the dictionary containing all observations by time unit as a tuple (case, variable).
variable: the observation to look for.
Returns:
A list with all timestamps where the observation is not present
"""
results = []
for key in obs_pair_hash:
obs = [item for item in obs_pair_hash[key] if item[1] == variable]
if len(obs) == 0:
results.append(key)
return(results) | 383fd38bed64dad14e9af3b36ed9b3e8db49a5cb | 115,830 |
import decimal
def big_int_cube_root(x):
"""Return the cube root of the given number.
This works with integers that would cause OverflowErrors when trying to
calculate the cube root the more straightforward way (x ** (1/3)). It
seems to reliably return the result with enough precision that cubing it
and rounding the cube produces the original number, although I don't yet
have any rigorous proof of this.
"""
with decimal.localcontext() as context:
# Guesstimate as to how much precision is needed to get the right result
context.prec = len(str(x)) // 3 + 4
return decimal.Decimal(x) ** (decimal.Decimal(1) / decimal.Decimal(3)) | c4308724acf5c0886250ce0eaa5f264e9527fa83 | 115,838 |
def problem_2_5(node):
""" Given a circular linked list, implement an algorithm which returns the
node at the beginning of the loop.
DEFINITION
Circular linked list: A (corrupt) linked list in which a node’s next pointer
points to an earlier node, so as to make a loop in the linked list.
EXAMPLE
input: A -> B -> C -> D -> E -> C [the same C as earlier]
output: C
SOLUTION:
- start two node traversals, one moving one node at a time, the other two
nodes at a time.
- if they meet, then the list has a loop, otherwise no loop.
- the place where they meet is k nodes aways from the start of the loop,
where k is the number of nodes from the begining of the list to the start of the loop.
- move one traverser on the begining of the list
- move both traversers one node at a time until they meet, this is the start of the loop.
"""
n1 = node # moves one node at a time.
n2 = node # moves two nodes at a time.
# Advance to the meeting point or reach the end of the list.
while n1 != None:
n1 = n1.next
# This works all the time, we are given that the list has a loop.
n2 = n2.next.next
if n1 == n2:
break
# If either n1 or n2 are None, then we found no loop.
if n1 == None or n2 == None:
return None
# Find the starting point of the loop.
n1 = node # n2 is still in the point where they met.
while n1 != n2:
n1 = n1.next
n2 = n2.next
return n1 | 90028102aaeb71607cfccf5583218a8c735dc09a | 115,841 |
import hashlib
def _sha512(data):
"""
Creates the string of sha512 hashed to the passed in data.
Args:
data (bytes): The data to be hashed
Returns:
type: str
The sha512 hashed data in string of hex values.
"""
return hashlib.sha512(data).hexdigest() | fdff68af99a479bb3bd463636878f90c44b8dd0a | 115,842 |
def show_most_common_entities(entity_dict, n=5):
"""Format information on entity count in a string
Args:
entity_dict (dictionary): entities and their attributes
n (int, optional): only the n most common occurrences are used. Defaults to 5.
Return:
string: formatted output string
"""
out_s = ''
for ent_type, ents in entity_dict.items():
out_s += '\t{}:\n'.format(ent_type)
n_ents = {k: v for k, v in sorted(ents.items(), key=lambda item: item[1], reverse=True)}
for idx, (ent, count) in enumerate(n_ents.items()):
out_s += '\t\t{}:\t{} ({})\n'.format(idx+1, ent, count)
if idx >= n:
break
out_s += '\n'
return out_s | 89945a6939ea9005bcf7a8ca2c9b44eb51ecc29d | 115,846 |
def html_movie_embed_qt(moviefile, width=400, height=400):
"""Return HTML for embedding a moviefile (QuickTime code)."""
text = """
<object classid="clsid:02BF25D5-8C17-4B23-BC80-D3488ABDDC6B"
codebase="https://www.apple.com/qtactivex/qtplugin.cab"
width="%(width)s" height="%(height)s" >
<param name="src" value="%(moviefile)s" >
<param name="autoplay" value="false" >
<embed src="%(moviefile)s"
pluginspage="https://www.apple.com/quicktime/download"
width="%(width)s" height="%(height)s" autoplay="false">
</embed>
</object>
""" % vars()
return text | 254ccd498c342657ceefc5687d79aa8c6ec53f28 | 115,851 |
def get_number_digits(number: int) -> int:
"""Gets the number of digits in a number.
Parameters
----------
number : int
The number.
Returns
-------
int
The number of digits.
"""
return len(str(number)) | 8bf5734d51b2f97451099f25354f375ef3e14875 | 115,852 |
import re
def process(text):
"""
Remove the leading " * " prefixes from a Purpose, Returns, or Throws
multiiline block of text that we've just pulled out of the function header.
Convert <text> to `text` (Markdown code).
"""
text = re.sub(r'^ \*[ \t]+(\S)', r'\1', text, flags=re.MULTILINE) # remove leading " * "
text = re.sub(r'^ \*', r'', text, flags=re.MULTILINE) # remove " *" alone
text = re.sub(r'<(\S|\S.*?\S)>', r'`\1`', text, flags=re.MULTILINE | re.DOTALL) # convert <text> to `text`
return text | 358fbfdaed7b0dd4d741f35776521f8c16d0a117 | 115,853 |
def close(scope):
"""
Closes the existing connection with the remote host. This function is
rarely used, as normally Exscript closes the connection automatically
when the script has completed.
"""
conn = scope.get('__connection__')
conn.close(1)
scope.define(__response__=conn.response)
return True | 719636165f1305a59e77ccc5f749641b06bef020 | 115,858 |
def query_long_descr(database, dict_name):
"""Return long description for a given dictionary."""
curs = database.cursor()
sql = '''
SELECT DI_LONG_DESCR
FROM DICTIONARIES
WHERE DI_DICT=?
'''
curs.execute(sql, (dict_name,))
results = list(curs)
if len(results) == 0:
return None
return results[0][0] | f6c3819f40df85307dffd24000c9edaa96ef6017 | 115,860 |
def db2lin(data):
"""
Convert from logarithm to linear units
Parameters
----------
data: single value or an array
Returns
-------
returns the data converted to linear
"""
return 10**(data/10.) | fe1320cdfcd53e4a96985c81d5edb76a16f26550 | 115,862 |
def is_not_none(value):
"""Helper function returning whether ``value is not None``"""
return value is not None | a7c926339881318e5e774334c803d23b1717ddf0 | 115,867 |
import torch
def mapk(output, target, k=3):
"""
Computes the mean average precision at k.
Parameters
----------
output (torch.Tensor): A Tensor of predicted elements.
Shape: (N,C) where C = number of classes, N = batch size
target (torch.int): A Tensor of elements that are to be predicted.
Shape: (N) where each value is 0≤targets[i]≤C−1
k (int, optional): The maximum number of predicted elements
Returns
-------
score (torch.float): The mean average precision at k over the output
"""
with torch.no_grad():
batch_size = target.size(0)
_, pred = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred)).float()
for i in range(k):
correct[i].mul_(1.0 / (i + 1))
score = correct.view(-1).sum(0)
score.mul_(1.0 / batch_size)
return score | d4d5ea89833192a8dd8b110d66b70b5804ed1763 | 115,869 |
def Check_VCF_Paths(tp_vcf_path, fp_vcf_path):
"""Checks to make sure there are equal numbers of tp and fp vcfs
Parameters
----------
tp_vcf_path : str
comma seperated list of TP vcf paths
fp_vcf_path : str
comma seperated list of FP vcf paths
Returns
-------
list
a list of tuples containing paired vcf paths
"""
if "," in tp_vcf_path or "," in fp_vcf_path:
if len(tp_vcf_path.split(",")) != len(fp_vcf_path.split(",")):
raise ValueError('Unequal number of True and False VCFs supplied')
return zip(tp_vcf_path.split(","), fp_vcf_path.split(",")) | 9e44b2a49f5bbe2a21aca6b0faa87297970df616 | 115,870 |
def get_nearest_neighbours(p, N, i):
"""Return the nearest N neighbours to a given point, i
Args:
p (DataFrame): vertices dataframe
N (int): integer for number of nearest neighbours to return
i (int): loc within dataframe p
Returns:
a tuple of locs of the nearest neighbours
"""
# p_new will be the returned dataframe
p_new = p.copy()
# calculate distances to other points
vecs = p_new[["x", "y", "z"]] - p[["x", "y", "z"]].loc[i]
dists = vecs.x**2 + vecs.y**2 + vecs.z**2
# merge distances into the p_new
dists = dists.to_frame(name='dist2')
p_new = p_new.join(dists)
p_new.sort_values(by='dist2', inplace=True)
return p_new.iloc[1:N+1] | 5f9b5b5ce7e09dc91a53d41bf9b8c0bb9619f75c | 115,873 |
def mean(values):
"""
Computes the mean of a list of values.
This is primarily included to reduce dependency on external math libraries
like numpy in the core algorithm.
:param values: a list of numbers
:type values: list
:return: the mean of the list of values
:rtype: float
>>> mean([600, 470, 170, 430, 300])
394.0
"""
if len(values) <= 0:
raise ValueError("Length of list must be greater than 0.")
return float(sum(values))/len(values) | 4406934e21a02cf9b2bfaef9206bef316381698b | 115,875 |
def sqlquote( value ):
"""Naive SQL quoting
All values except NULL are returned as SQL strings in single quotes,
with any embedded quotes doubled.
"""
if value is None or value=="":
return 'NULL'
return "'{}'".format(str(value).replace( "'", "''" )) | 918230f9d3a914065ed7c4e3644ddd0d8fbb2b89 | 115,877 |
from typing import Tuple
from typing import List
def parse_path(path_string: str) -> Tuple[str, List[str]]:
"""
Inputs:
path_string: a string containing drive_name a colon and one or more folders like:
'my_drive:folder1/folder2'
returns a tuple of the drive_name, folder_list
"""
drive = path_string.split(":")[0]
remainder = ":".join(path_string.split(":")[1:])
return drive, remainder.split("/") | ec94348c9118ce25e754db59bc320e6e83470e2a | 115,882 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.