content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def bool_to_yes_no(value):
"""
Turn a boolean into a yes/no string
:param value eg True:
:return string eg "yes":
"""
if value:
return "yes"
return "no"
|
4a2716519b19ff6e9ba025ad3a1d5b36600cbaec
| 390,631
|
def Finish_to_c(self):
"""Syntax conversion for breaking a loop."""
return f"goto break_{self.targetID};"
|
0ca45ac96bbeae75c0bbeead4383efa96d3ee00a
| 670,389
|
def _passes_cortex_depth(line, min_depth):
"""Do any genotypes in the cortex_var VCF line passes the minimum depth requirement?
"""
parts = line.split("\t")
cov_index = parts[8].split(":").index("COV")
passes_depth = False
for gt in parts[9:]:
cur_cov = gt.split(":")[cov_index]
cur_depth = sum(int(x) for x in cur_cov.split(","))
if cur_depth >= min_depth:
passes_depth = True
return passes_depth
|
59bf4336ce5ef71054b3869725aa56d9ead8b35d
| 436,285
|
def f_line_count_fd(stream, *args, **kwargs):
"""
A quick utility function to count the remaining number of
lines in an already opened stream. If the stream is seekable,
this function will save and revert to the original position
after counting
:param stream
A stream object which may be iterated line by line.
:param offset
A numeric offset to apply to the resulting line count.
:return int
The number of lines remaining in the given stream.
"""
prev = None
if stream.seekable():
prev = stream.tell()
lines = 0
if "offset" in kwargs:
lines += kwargs["offset"]
for _ in stream:
lines += 1
if prev is not None:
stream.seek(prev)
return lines
|
016f1c889e18664ff4b5d9758977f241f6e148c9
| 404,250
|
import platform
import struct
def _ioc_encode(direction, number, structure):
"""
ioctl command encoding helper function
Calculates the appropriate spidev ioctl op argument given the direction,
command number, and argument structure in python's struct.pack format.
Returns a tuple of the calculated op and the struct.pack format
See Linux kernel source file /include/uapi/asm/ioctl.h
"""
ioc_magic = ord("k")
ioc_nrbits = 8
ioc_typebits = 8
if platform.machine() == "mips":
ioc_sizebits = 13
else:
ioc_sizebits = 14
ioc_nrshift = 0
ioc_typeshift = ioc_nrshift + ioc_nrbits
ioc_sizeshift = ioc_typeshift + ioc_typebits
ioc_dirshift = ioc_sizeshift + ioc_sizebits
size = struct.calcsize(structure)
operation = (
(direction << ioc_dirshift)
| (ioc_magic << ioc_typeshift)
| (number << ioc_nrshift)
| (size << ioc_sizeshift)
)
return direction, operation, structure
|
b3f8ac548f767cdb86fbcd0e9819a2d693d24bc9
| 632,764
|
import requests
from typing import Optional
from datetime import datetime
def get_latest_block(alchemy_request_url: str, session: requests.Session) -> Optional[str]:
"""Get the latest block on the network.
Computing the latest block and using it throughout other methods insures
for correctness if the network moves to another block while the script is running.
[Alchemy API docs](https://docs.alchemy.com/alchemy/apis/ethereum/eth-blocknumber)
Args:
alchemy_request_url: Request URL used to query Alchemy's API
session: Session object used for sharing state across requests
Returns:
Latest block, None if unsuccessful. If latest block returns None
due to a network error, the "latest" block is taken at time of API call.
"""
payload = {
"jsonrpc": "2.0",
"id": 0,
"method": "eth_blockNumber",
"params": [],
}
try:
response = session.post(alchemy_request_url, json=payload)
response.raise_for_status()
latest_block = response.json()['result']
print(f"Latest block as of time {str(datetime.now())} is {latest_block}")
return latest_block
except requests.exceptions.HTTPError as e:
print(e)
return None
|
7da8887c7d8254e34bd02970a2af3d1c77e956d3
| 154,741
|
def _is_linux_binary(fpath):
"""
Reads a magic byte and determines if the file given is a Linux (ELF) binary.
"""
with open(fpath, 'rb') as f:
return f.read(4) == b'\x7fELF'
|
dc326401d731ea1acbbf9c29116b6fe8123c85f1
| 477,746
|
def mask_percentage(mask):
"""Returns the percentage of pixels in the mask that are True.
:param mask: HxW bool NumPy array (False for locations to be excluded)
:return: float
"""
return mask.sum() / mask.size
|
bad1c489dca2ef3a440bba1f09d48935295a8c68
| 285,886
|
def roman_to_integer(s: str) -> int:
"""
Returns the coresponding integer from inputed Roman Numerals.
This function assumes the input is in proper Roman Numeral form.
>>> III
3
>>> IV
4
>>> XL
40
"""
lst = []
lst2 =[]
count = 0
temp = 0
for i in s:
if i == 'I':
i = 1
lst.append(i)
if i == 'V':
i = 5
lst.append(i)
if i == 'X':
i = 10
lst.append(i)
if i == 'L':
i = 50
lst.append(i)
if i == 'C':
i = 100
lst.append(i)
if i == 'D':
i = 500
lst.append(i)
if i == 'M':
i = 1000
lst.append(i)
for i in lst:
if i > temp:
i -= temp
lst2.append(i)
else:
lst2.append(i)
temp = i
temp = 0
for i in lst2:
if i < temp:
lst2.remove(i)
temp = i
for i in lst2:
count += i
return count
|
6cb22e56738a3f533a3ab00e549645bcbd3f52f6
| 201,490
|
def bubble(a):
"""
Bubble Sort: compare adjacent elements of the list left-to-right,
and swap them if they are out of order. After one pass through
the list swapping adjacent items, the largest item will be in
the rightmost position. The remainder is one element smaller;
apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a)-1):
for j in range(len(a)-i-1):
if a[j+1] < a[j]:
a[j],a[j+1] = a[j+1],a[j]
count += 1
return count
|
f2c601f60ef29c0377dd63c9dfd5937026a60ec6
| 333,370
|
def link_info_gen(C: dict, mapping: dict, link_delay, link_loss):
"""Generate the specified link information
Args:
C (dict): Capacity dictionary (key, value) = (link ID, capacity of the link).
mapping (dict): Link dictionary (key, value) = (link ID, source and destination pair of the link).
link_delay (float): Specified delay for the link.
link_loss (float): Specified loss for the link.
Returns:
str: Link infomation for all specified links, seperated by ;.
Examples:
The return can be "link_info: s1, s17, 5, 5ms, 0, N/A, N/A; s2, s17, 5, 5ms, 0, N/A, N/A; ..."
"""
link_info = "link_info: "
for link_id, cap in C.items():
link_info += "{}, {}, {}, {}ms, {}, N/A, N/A; ".format(*mapping[link_id], cap, link_delay, link_loss)
return link_info[:-2]
|
36681f44eaf8537fa956d090c5a8181f5daef3cd
| 348,957
|
import difflib
def generate_diff(originalLicenseText, inputLicenseText):
"""Generate difference of the input license text with that of SPDX license.
Arguments:
originalLicenseText {string} -- SPDX license text of the closely matched license.
inputLicenseText {string} -- license text input by the user.
Returns:
list -- list of lines containing the difference between the two license texts.
"""
lines = []
for line in difflib.unified_diff(originalLicenseText.splitlines(), inputLicenseText.splitlines()):
lines.append(line)
return lines
|
af4cad3836f5c0302dc529b1a5fe161236e26a84
| 163,787
|
def get_sum(path, tree):
""" Returns the sum of all the numbers in the path for the given tree. """
pathsum = 0
for i, row in enumerate(tree):
pathsum += row[path[i]]
return pathsum
|
018cc24c6a1dc20cd9dbf5b0db6e3b20e257c7e6
| 660,551
|
def expand_span(span, expand_length=2):
"""
Args:
span (list): [st, ed]
expand_length (int): length to add on the two sides
Returns:
expanded_span (list): [max(0, st-expand_length), ed + expand_length]
Only use the span for indexing, no need to worry the case where
(ed + expand_length) >= max_length.
"""
return [max(0, span[0] - expand_length), span[1] + expand_length]
|
083d4d8dd70a4365b4ec06fc06e6c3d3c6055804
| 427,365
|
def density_net(G0):
"""
Function that receives a directed network and computes the density of it
"""
m0 = G0.size() # Number of links in the network G0
n0 = G0.number_of_nodes() # Number of nodes in the network G0
return m0 / (n0 * (n0 - 1.0))
|
2cc56d31bc86def6b79ea29dc37b0d170ca2f43f
| 257,480
|
def bdev_raid_get_bdevs(client, category):
"""Get list of raid bdevs based on category
Args:
category: any one of all or online or configuring or offline
Returns:
List of raid bdev names
"""
params = {'category': category}
return client.call('bdev_raid_get_bdevs', params)
|
ab9961e2c6eb181b641449da9db1e38c3d7ed845
| 498,408
|
import re
def header_list (exprDF):
"""
Function to obtain a list of the miR and genes present in the dataframe
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Return:
lMir list miR List
lGene list Gene List
"""
lAll = exprDF.columns.tolist()
patMir = re.compile("^hsa-...-*")
lMir = [i for i in lAll if patMir.match(i)]
lGene = [i for i in lAll if not patMir.match(i)]
return lMir, lGene
|
372bd36db1bffafb02f4063a3fb4bbfc9780d1ba
| 228,226
|
from typing import Union
from pathlib import Path
def find_mo(search_paths=None) -> Union[Path, None]:
"""
Args:
search_paths: paths where ModelOptimizer may be found. If None only default paths is used.
Returns:
path to the ModelOptimizer or None if it wasn't found.
"""
default_mo_path = ('intel', 'openvino', 'deployment_tools', 'model_optimizer')
default_paths = [Path.home().joinpath(*default_mo_path), Path('/opt').joinpath(*default_mo_path)]
executable = 'mo.py'
for path in search_paths or default_paths:
path = Path(path)
if not path.is_dir():
continue
mo = path / executable
if not mo.is_file():
continue
return mo
return None
|
4657e15649692415dd10f2daa6527cade351d8fc
| 6,241
|
import torch
def apply_plot_args_to_trajectories(args, obs_traj, gt, predictions):
"""
Apply command line arguments to the trajectories to be plotted
:param args: command line arguments containing the several changes/restrictions to the trajectories
:param obs_traj: Tensor of shape [obs_traj_len, num_peds, 2]. Past trajectories of all pedestrians
:param gt: Tensor of shape [pred_traj_len, num_peds, 2]. Real Future (ground truth) trajectories of all pedestrians
:param predictions: Tensor of shape [num_methods, pred_traj_len, num_peds, 2]. Predicted trajectories by
'num_methods' different models, for all pedestrians
:return: the three aforementioned tensors, with the arguments applied.
"""
squeeze_unsqueeze = obs_traj.ndim < 3 # for the case of just one pedestrian - shape (traj_len, 2)
if squeeze_unsqueeze:
obs_traj, gt, predictions = obs_traj.unsqueeze(1), gt.unsqueeze(1), predictions.unsqueeze(2)
num_peds = obs_traj.shape[1]
seq_mask = torch.ones(num_peds, device=obs_traj.device).to(torch.bool)
if args.ignore_neighbours_past:
seq_mask *= ~torch.all(torch.isnan(gt[:, :, 0]), dim=0)
obs_traj, gt, predictions = obs_traj[:, seq_mask], gt[:, seq_mask], predictions[:, :, seq_mask]
if args.switch_x_y:
obs_traj[:, :, [0, 1]] = obs_traj[:, :, [1, 0]]
gt[:, :, [0, 1]] = gt[:, :, [1, 0]]
predictions[:, :, :, [0, 1]] = predictions[:, :, :, [1, 0]]
if args.invert_x:
obs_traj[:, :, 0] = - obs_traj[:, :, 0]
gt[:, :, 0] = - gt[:, :, 0]
predictions[:, :, :, 0] = - predictions[:, :, :, 0]
if args.invert_y:
obs_traj[:, :, 1] = - obs_traj[:, :, 1]
gt[:, :, 1] = - gt[:, :, 1]
predictions[:, :, :, 1] = - predictions[:, :, :, 1]
if squeeze_unsqueeze:
return obs_traj.squeeze(1), gt.squeeze(1), predictions.squeeze(2)
return obs_traj, gt, predictions
|
fec18dc35489cbaf475ebafaba23b8f82ff23234
| 396,775
|
from typing import List
import pkgutil
def find_commands(command_dir: str) -> List[str]:
"""
Get all command names in the a folder
:return: List of commands names
"""
if not command_dir:
return []
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith('_')]
|
3f9ad515955293e7a91e5246e4c25126f4f83cf8
| 359,483
|
def _get_output_dir(name):
""" Return the name of the main genrule's output directory """
return name + "-outputs"
|
5862c44d39e6f7ffd86ad3fb1f95b8240e4409cc
| 323,273
|
import zlib
def _crc32_checksum(filepath):
"""Calculate the checksum of a file using CRC32."""
with open(filepath, "rb") as f:
checksum = zlib.crc32(f.read())
return checksum
|
21ab0fe07580c7176dec1f0d0ea280d9f72404ae
| 88,313
|
import torch
def depth_regression(p: torch.Tensor, depth_values: torch.Tensor) -> torch.Tensor:
"""Implements per-pixel depth regression based upon a probability distribution per-pixel.
The regressed depth value D(p) at pixel p is found as the expectation w.r.t. P of the hypotheses.
Args:
p: probability volume [B, D, H, W]
depth_values: discrete depth values [B, D]
Returns:
result depth: expected value, soft argmin [B, 1, H, W]
"""
depth_values = depth_values.view(*depth_values.shape, 1, 1)
depth = torch.sum(p * depth_values, dim=1)
depth = depth.unsqueeze(1)
return depth
|
8ba923fe20bd5d87ee4acb49956dd1c0e7f105df
| 271,916
|
def get_set_sizes(df):
"""
get sizes of training and test sets based on size of dataset.
testing set: 10% of training set
:return: training_set_size, testing_set_size
"""
data_set_size = len(df.index)
training_set_size = data_set_size * 0.9
testing_set_size = data_set_size * 0.1
return int(training_set_size), int(testing_set_size)
|
e07495f651814e32743cecd42d23b9f7dd187a74
| 658,633
|
import base64
def base64_decode(value):
"""Dencode the value in base64"""
return base64.b64decode(value)
|
be47a54a9bcbc8782662372a4a1917db3d36535b
| 352,465
|
import torch
def arrange_neighbor(neighbor1, neighbor2, assignment):
"""This function re-arrange the neighbor, so the neighbors will be matched
Args:
neighbor1: 3D tensor, [bs, num_node1, neighbor_k, dim]
neighbor2: 3D tensor, [bs, num_node2, neighbor_k, dim]
assignment: [bs, num_node1, num_node2, neighbor_k, 2]
"""
index1, index2 = torch.chunk(assignment, 2, dim=-1) # [bs, num_node1, num_node2, neighbor_k, 1]
index1 = index1.repeat(1, 1, 1, 1, neighbor1.size(-1)) # [bs, num_node1, num_node2, neighbor_k, dim]
index2 = index2.repeat(1, 1, 1, 1, neighbor2.size(-1)) # [bs, num_node1, num_node2, neighbor_k, dim]
neighbor1 = torch.gather(neighbor1, index=index1.long(), dim=-2)
neighbor2 = torch.gather(neighbor2, index=index2.long(), dim=-2)
return neighbor1, neighbor2
|
3c71f8afeb5ded4b5cf47a9b1403e3856c8b8b39
| 489,350
|
def find_matching(text, pos, char1, char2):
"""Finds the matching closing character char2 corresponding to the opening
character char1, starting from position pos.
"""
depth = 1
for i in range(pos, len(text)):
if text[i] == char1:
depth += 1
elif text[i] == char2:
depth -= 1
if depth == 0:
return i
return -1
|
94838305f6dd7e406fd1f0a4d59c108a0e5e9376
| 294,441
|
def find_list_index(a_list, item):
"""
Finds the index of an item in a list.
:param a_list: A list to find the index in.
:type a_list: list
:param item: The item to find the index for.
:type item: str
:return: The index of the item, or None if not in the list.
:rtype: int | None
"""
if item in a_list:
for i, value in enumerate(a_list):
if value == item:
return i
|
c9eb862b4af3eb113cca9ee55edda05b9fbfa8fe
| 40,940
|
def _compute_regularization(alpha, l1_ratio, regularization):
"""Compute L1 and L2 regularization coefficients for W and H."""
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
l1_reg_W = alpha_W * l1_ratio
l1_reg_H = alpha_H * l1_ratio
l2_reg_W = alpha_W * (1. - l1_ratio)
l2_reg_H = alpha_H * (1. - l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
|
0c25c3f68817276b8e12f0f261202e411cc9e2bf
| 274,837
|
import requests
def object_store_get(api_key, object_type, object_id):
"""
Fetches an object of specific type from the Object Store
:param api_key: Import.io API Key
:param object_type: Type of object: crawlrun, extractor, etc
:param object_id: Unique identifier of an object
:return: response
"""
url = "https://store.import.io/store/{0}/{1}".format(object_type, object_id)
querystring = {
"_apikey": api_key
}
headers = {
'accept': "application/json",
'cache-control': "no-cache",
}
response = requests.request("GET", url, headers=headers, params=querystring)
return response
|
23b082939bb1578b7430b5d28798572372a37d5f
| 233,307
|
def _true(*args, **kwargs): # pylint: disable=unused-argument
"""Return ``True``."""
return True
|
0496944d2d1095a6b38209f036ecd6eb930b2de2
| 303,484
|
import re
def parse_extensions(extensions: str) -> re.Pattern:
"""
Create a regex parser to check for file extensions.
Note: Separate extensions by one of
[',', '`', '*', ' ']
"""
sep = [i for i in ',`* ' if i in extensions]
pattern = '|'.join(f'\.{i}$' for i in extensions.split(sep[0] if sep else None))
pat = re.compile(pattern, re.I)
return pat
|
67839ea427fbf94fb781301eaa8ec03e071d306c
| 500,943
|
def whitespace_lines(lines: list[str]) -> bool:
""" Return whether lines are all whitespaces """
return all(not line.strip() for line in lines)
|
e9cb003fd57e7973d376e4680b189f3c5d9c38b6
| 138,517
|
from datetime import datetime
def dict_to_dt(dt_dict):
"""Reverse of :func:`dt_dict_now`.
:param dict dt_dict: A :class:`dict` (such as :func:`dt_dict_now` returns)
that correspond with the keyword parameters of the
:class:`~datetime.datetime` constructor.
:return: A :class:`~datetime.datetime` object.
:rtype: datetime.datetime
"""
return datetime(**dt_dict)
|
23e083badc45a593842a959e94a58857699966b1
| 450,465
|
import torch
def MSE(output, gt):
"""Compute mean squared error."""
return torch.mean((output - gt)**2)
|
bcafc7baa8c318a3b94343cd966ba8db7e22239f
| 378,350
|
def bytes_to_label(label_bytes):
"""
Takes the bytes from a TYPE_STATELABEL packet removes the NUL char and
and everything after, then converts it to unicode.
:param label_bytes: The bytes from the TYPE_STATELABEL packet
:returns: unicode -- The label of the device
"""
strlen = label_bytes.find('\x00')
return label_bytes[0:strlen].decode('utf-8')
|
8184cf59ed7f35d9f72137c14920f4e861fcd8da
| 236,358
|
def add_xor(op_a: bytearray, op_b: bytearray) -> bytearray:
"""
Byte-by-byte 'xor' operation for byte objects.
Args:
op_a: The first operand.
op_b: The second operand.
Returns:
Result of the byte-by-byte 'xor' operation.
"""
op_a = bytearray(op_a)
op_b = bytearray(op_b)
result_len = min(len(op_a), len(op_b))
result = bytearray(result_len)
for i in range(result_len):
result[i] = op_a[i] ^ op_b[i]
return result
|
d8caee607e7ec7001f1a52e26af78fdd36226069
| 320,261
|
def delete_file_nokia_sros(ssh_conn, dest_file_system, dest_file):
"""Delete a remote file for a Nokia SR OS device."""
full_file_name = "{}/{}".format(dest_file_system, dest_file)
cmd = "file delete {} force".format(full_file_name)
cmd_prefix = ""
if "@" in ssh_conn.base_prompt:
cmd_prefix = "//"
ssh_conn.send_command(cmd_prefix + "environment no more")
output = ssh_conn.send_command_timing(
cmd_prefix + cmd, strip_command=False, strip_prompt=False
)
return output
|
ef7a5152ed01640782b30a3509df5d8a9a7cfacf
| 660,914
|
import pytz
from datetime import datetime
def datetimeToEpochtime(dt):
"""Convert a datetime object to epoch time"""
if dt.tzinfo is None:
dt_utc = dt
else:
dt_utc = dt.astimezone(pytz.utc).replace(tzinfo=None)
epoch_utc = datetime.utcfromtimestamp(0)
return int((dt_utc - epoch_utc).total_seconds() * 1000)
|
c0baf06de714811593ecc8445408ff9498de6750
| 591,541
|
def get_dict_key_from_value(dictionary, val):
"""
Return the key of a dictionary that stores the val
Parameters
----------
dictionary: dict
val: anything that can be stored in a dictionary
"""
for key in dictionary:
value = dictionary[key]
if val == value:
return key
return "key doesn't exist"
|
3cfaf464dcd7e42f809b23d39e5164e6dcef5b47
| 301,013
|
import six
def di(row):
"""Returns a dict_items object for easier comparison"""
return six.viewitems(row)
|
2c2f12db46925e704c13cd52d55954d734c796a1
| 309,871
|
from typing import OrderedDict
def build_request_include(include, params):
"""Augment request parameters with includes.
When one or all resources are requested an additional set of
resources can be requested as part of the request. This function
extends the given parameters for a request with a list of resource
types passed in as a list of :class:`Resource` subclasses.
Args:
include([Resource class]): A list of resource classes to include
params(dict): The (optional) dictionary of request parameters to extend
Returns:
An updated or new dictionary of parameters extended with an
include query parameter.
"""
params = params or OrderedDict()
if include is not None:
params['include'] = ','.join([cls._resource_type() for cls in include])
return params
|
396731528670c3ee328f47ac0ab5da3d587f8897
| 385,539
|
def _CreateYumPkgRepo(messages, repo_id, display_name, repo_name):
"""Create a yum repo in guest policy.
Args:
messages: os config guest policy api messages.
repo_id: 'google-cloud-logging' or 'google-cloud-monitoring'.
display_name: 'Google Cloud Logging Agent Repository' or 'Google Cloud
Monitoring Agent Repository'.
repo_name: repository name.
Returns:
yum repos in guest policy.
"""
return messages.PackageRepository(
yum=messages.YumRepository(
id=repo_id,
displayName=display_name,
baseUrl='https://packages.cloud.google.com/yum/repos/%s' % repo_name,
gpgKeys=[
'https://packages.cloud.google.com/yum/doc/yum-key.gpg',
'https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg'
]))
|
9fb94e46e8fe3d976ef89c9d2e9117651c0a6e96
| 421,594
|
def checker(str):
"""
Quick utility function to help with our input Q&A
"""
valid_inputs = ['y', 'yes', 'n', 'no']
var = input(str).lower()
while not var in valid_inputs:
print("Valid inputs for this question are y, n, yes, and no.")
var = input(str).lower()
return var
|
8b53bcab94b9f29687ccb8707227e820742ae282
| 368,888
|
def generate_csv_url(sheet_url):
"""
Utility function for generating csv URL from a google sheets link
This function generates a link to a csv file from a link used to edit a google sheets file.
The gid must be present in the URL.
Parameters
----------
sheet_url : str
The URL for the google sheet file
Returns
-------
str
URL for the csv file
"""
if type(sheet_url) == str:
if(sheet_url.find("edit#gid") > -1):
return sheet_url.replace("edit#gid", "export?format=csv&gid")
else:
raise ValueError("sheet_url must contain 'edit#gid' phrase")
else:
raise TypeError("sheet_url must be a string")
|
d941ef98f3400175b9db4f7ef5da858fc6426caf
| 13,291
|
def compound_fwd_query(query, rel):
"""
Create a compound forwards query that selects the
destination nodes, which have source nodes within
the subquery.
:param query: The subquery.
:param rel: The relation.
"""
smt = 'SELECT dst FROM %s WHERE src IN (%s)'
return smt % (rel, query), ()
|
b4db27ac83bcf9ac90f30cae1ea90f68e637d251
| 360,717
|
def response_error(error_code):
"""Send a response erorr."""
err = ('HTTP/1.1 ' + error_code + '\r\n\r\n').encode('utf8')
err += b'Sorry we could not fulfill your request.\r\n\r\n'
return err
|
8604d888194d033ea28b2593c2bb171ec76e0433
| 157,501
|
def int2letters(x, alphabet):
"""
Return the alphabet representation of a non-negative integer x.
For example, with alphabet=['a','b']
0 -> 'a'
1 -> 'b'
2 -> 'aa'
3 -> 'ab'
4 -> 'ba'
Modified from:
http://stackoverflow.com/questions/2267362/convert-integer-to-a-string-in-a-given-numeric-base-in-python
"""
base = len(alphabet)
if x < 0:
raise ValueError('Only non-negative numbers are supported. Encounterd %s' % x)
letters = []
quotient = x
while quotient >= 0:
quotient, remainder = divmod(quotient, base)
quotient -= 1
letters.append(alphabet[remainder])
letters.reverse()
return ''.join(letters)
|
6c536463b3bbbc52fa2362584806b75c47e14c23
| 134,892
|
def make_jinja2_filename(file_name: str) -> str:
""" Add .jinja2 to a filename.
:param file_name: the filename without an extension.
:return: the filename.
"""
return f'{file_name}.jinja2'
|
3d8a9fe002e75b51e0b3e333bd8b7a4766bdb23b
| 576,966
|
import csv
def CSVToMatrix(filename):
"""reads a csv file and returns a list of lists where each list item is a row in the csv"""
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=",")
csvmatrix = []
for row in reader:
csvmatrix.append(row)
return csvmatrix
|
c54db6aa0c53c6b28f33fd81b0f72ee70a56bd37
| 191,269
|
def get_feedstock_name_from_meta(meta):
"""Resolve the feedtstock name from the parsed meta.yaml."""
if "feedstock-name" in meta.meta["extra"]:
return meta.meta["extra"]["feedstock-name"]
elif "parent_recipe" in meta.meta["extra"]:
return meta.meta["extra"]["parent_recipe"]["name"]
else:
return meta.name()
|
1706bf74005730139f6783e4bb6c68c725be4f0b
| 71,765
|
def rect2lines(bbox):
"""
Given a bounding box, convert it into a path of points
Parameters
----------
bbox : list or numpy array [x1 y1 x2 y2]
Bounding box defined as top left and bottom right points
of the box
Returns
-------
point_list : list
List of five points defining a path to draw the bounding
box as a rectangle
"""
point_list = [
bbox[0], bbox[1],
bbox[2], bbox[1],
bbox[2], bbox[3],
bbox[0], bbox[3],
bbox[0], bbox[1]
]
return point_list
|
cd653172f7ca3646c91ab20b90e2d42b0eb6f269
| 366,816
|
def encode_int(i, nbytes, encoding='little'):
""" encode integer i into nbytes bytes using a given byte ordering """
return i.to_bytes(nbytes, encoding)
|
82b8268af56fce1c4118b9b7f632d8b36d209d89
| 187,884
|
def get_mysql_client_cmd(db_username, db_password, db_host, db_port, db_schema):
"""
Returns a connection string according to the given variables
"""
if db_password:
cmd = "mysql -h %s -u%s -p%s -P %s %s" % (db_host, db_username, db_password, db_port, db_schema)
else:
cmd = "mysql -h %s -u%s -p -P %s %s" % (db_host, db_username, db_port, db_schema)
return cmd
|
e13c30bd93a973c606d58f45a199fbc1bdae69b9
| 405,886
|
import math
def phred33_to_rate(q):
"""Convert a phred33 character to an error rate"""
return math.pow(10,float(ord(q)-33)/-10)
|
0efb0d22fc9f189c87611bc6ea931b262a3cf49b
| 163,907
|
from typing import Tuple
from typing import Dict
def make_schema(endpoints: Tuple[str]) -> Dict:
"""Generate a Swagger 2.0 schema with the given endpoints.
Example:
If `endpoints` is ("success", "failure")
then the app will contain GET /success and GET /failure
"""
template = {
"swagger": "2.0",
"info": {"title": "Example API", "description": "An API to test Schemathesis", "version": "1.0.0"},
"host": "127.0.0.1:8888",
"basePath": "/api",
"schemes": ["http"],
"paths": {},
}
for endpoint in endpoints:
template["paths"][f"/{endpoint}"] = {
"get": {"summary": "Endpoint", "produces": ["application/json"], "responses": {200: {"description": "OK"}}}
}
return template
|
df8d3116f8124ecd14a0e3b34eb2e335e48d393e
| 239,373
|
def sumTo(aBound):
""" Return the sum of 1+2+3 ... n """
theSum = 0
aNumber = 1
while aNumber <= aBound:
theSum = theSum + aNumber
aNumber = aNumber + 1
return theSum
|
14130fdb9b062b399c04755c1a6030557c1bce52
| 447,799
|
def dict_mapper(obj):
"""
Converts an object to a dictionary if possible
"""
return obj.to_dict() if hasattr(obj, "to_dict") else obj
|
ab4361bfc9157ba9eebbd8fba75c71adf8904b80
| 151,611
|
import json
def parse_broadcast_message(to_parse):
"""Parse the given string as a json and return a dictionary with the json's contents"""
try:
decoded = json.loads(to_parse)
if decoded is not None:
return decoded
except ValueError:
# this is a JSONDecodeError, but we are catching superclass for backwards compatibility
return None
|
cf2db5a7b68b4bcebfbe592938084b13cbe731bf
| 597,949
|
def evaluate_precision(tp: int, fp: int) -> float:
"""Precision, aka Positive Predictive Value (PPV).
$PPV=\dfrac{TP}{TP + FP}$
Args:
tp: True Positives
fp: False Positives
"""
try:
return tp / (tp + fp)
except ZeroDivisionError:
return 0.0
|
5c591113833fffbb95cf1f638c76fa1c4e28044f
| 578,904
|
def const(a):
"""
The constant function. A function that returns a constant value.
a -> b -> a
"""
return lambda b: a
|
1dc9fe5b12ac20fbb68cffb743b4cd4dce77c017
| 52,283
|
def _extract_group(encr_text, fst_group_pos, snd_group_pos, min_group_len):
"""
Extract the largest group of characters may match at each position
ARGUMENT NOTES:
min_group_len -- The min length of the group
RETURN NOTES:
If the group has no minimum size, None. Otherwise, the following tuple:
(fst_group_pos, snd_group_pos, group_str)
USAGE:
>>> _extract_group('CSASTPKVSIQUTGQUCSASTPIUAQJB', 0, 16, 3)
(0, 16, 'CSASTP')
"""
old_fst_group_pos, old_snd_group_pos = fst_group_pos, snd_group_pos
group = ""
while encr_text[fst_group_pos] == encr_text[snd_group_pos]:
group += encr_text[fst_group_pos]
fst_group_pos += 1
snd_group_pos += 1
if fst_group_pos >= len(encr_text) or snd_group_pos >= len(encr_text):
break
if not group or len(group) < min_group_len:
return None
else:
return (old_fst_group_pos, old_snd_group_pos, group)
|
535c3974dc8c85b4ec6554cd8902ddf347e86bf2
| 248,754
|
import copy
import time
def process_like(proc):
"""Make an exact clone of a process, including state and all subprocesses.
The creation date is updated.
:param proc: process
:type proc: :class:`~climlab.process.process.Process`
:return: new process identical to the given process
:rtype: :class:`~climlab.process.process.Process`
:Example:
::
>>> import climlab
>>> from climlab.process.process import process_like
>>> model = climlab.EBM()
>>> model.subprocess.keys()
['diffusion', 'LW', 'albedo', 'insolation']
>>> albedo = model.subprocess['albedo']
>>> albedo_copy = process_like(albedo)
>>> albedo.creation_date
'Thu, 24 Mar 2016 01:32:25 +0000'
>>> albedo_copy.creation_date
'Thu, 24 Mar 2016 01:33:29 +0000'
"""
newproc = copy.deepcopy(proc)
newproc.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z",
time.localtime())
return newproc
|
a28d3f2ed6634bcce623239df9376cef6d3f543e
| 561,205
|
import re
def validate_container_name(name):
"""Make sure a container name accordings to the naming convention
https://docs.openstack.org/developer/swift/api/object_api_v1_overview.html
https://lists.launchpad.net/openstack/msg06956.html
> Length of container names / Maximum value 256 bytes / Cannot contain the / character.
"""
validate_name = re.compile('^[^/]+$')
return (
len(name) <= 256 and bool(validate_name.match(name))
)
|
5bef8b304c004dc3169b6984b49a0d669fc9b7b3
| 12,490
|
from typing import Iterable
def _table_cell(items: Iterable[str]) -> str:
"""Make a row of table cell."""
return '|' + '|'.join(f" {t} " for t in items) + '|'
|
4b489283126900cdd371eb99c4ae3bd114232966
| 668,832
|
def get_waters(lines):
"""Helper function to extract waters from a PDB file"""
return "".join([line for line in lines if line[17:20] == "HOH"])
|
468fa05f0a1213669eb1a1ac2d29b0191ba96887
| 61,910
|
def TSKVsPartGetStartSector(tsk_vs_part):
"""Retrieves the start sector of a TSK volume system part object.
Args:
tsk_vs_part: a TSK volume system part object (instance of
pytsk3.TSK_VS_PART_INFO).
Returns:
The start sector or None.
"""
# Note that because pytsk3.TSK_VS_PART_INFO does not explicitly defines
# start we need to check if the attribute exists.
return getattr(tsk_vs_part, u'start', None)
|
b7899143426344d4f09ea5369b19f9d052760054
| 415,168
|
def add (x, y):
"""returns the sum of two vectors"""
return x [0] + y [0], x [1] + y [1]
|
dd2d0dca541a01852ae60f5ea21e7be3becaff8b
| 659,416
|
def clean_timestamp(timestamp, format=False):
"""
used to remove unwanted characters from the overly long timestamp in the json data of a tweet
eg: timestamp comes in as 'Thu Dec 17 13:44:24 +0000 2020' and for now we only want 'Thu Dec 17 13:44'
"""
cleaned_timestamp_list = str(timestamp).split(' ')[0:4]
# remove the seconds from the actual time part of the string
cleaned_timestamp_list[3] = cleaned_timestamp_list[3][0:5]
# join them back into a string
cleaned_timestamp = ' '.join(cleaned_timestamp_list)
return cleaned_timestamp
|
5fc1846f411ea8379dccf45f0770b946c718002b
| 680,318
|
def correct_barcode(barcode, mismatch_map):
"""
Correct an observed raw barcode to one of a list of whitelists of mismatches.
Args:
barcode (string): barcode sequence to be corrected
mismatch_map (list of dict dict): list of dict of mismatched sequences to real sequences
Returns:
string: corrected barcodes or None if barcode not correctable.
"""
for mismatch_whitelist in mismatch_map:
corrected = mismatch_whitelist.get(barcode, None)
if corrected:
return corrected
return None
|
a130d7ee285acc198eeece53641e13dd3ebebcf0
| 433,792
|
import json
def get_car_parking_drift(car):
"""
Gets properties that can change during a parking period but aren't
considered to interrupt the parking.
These are things like a car charging while being parked.
:param car: must be formatted in normalized electric2go dict format
:return: a hashable object
"""
# Use json.dumps() because a dict is not hashable.
# Sort keys to ensure deterministic key order in dumped JSON.
# Note: using sort_keys prevents us from using e.g. ujson
offer_drive_price = json.dumps(car['price_offer_details'], sort_keys=True)
return (car['api_estimated_range'], car['fuel'],
car['charging'], car['price_offer'], offer_drive_price)
|
9204321748d81ee2801a76ce0fe53de9cb81cda8
| 233,886
|
def check_utilization(nn, np, ppn, threshold=0.9, name=None):
"""Check whether the calculated node utilization is below threshold.
This function raises a :class:`RuntimeError` if the calculated
node utilization is below the given threshold or if the number
of calculated required nodes is zero.
:param nn:
Number of requested nodes.
:param np:
Number of required processing units (e.g. CPUs, GPUs).
:param ppn:
Number of processing units available per node.
:param threshold:
The minimally required node utilization.
:param name:
A human-friendly name for the tested processing unit
to be used in the error message, for example: CPU or GPU.
:returns:
The number of calculated nodes.
:raises RuntimeError:
Raised if the node utilization is below the given threshold.
"""
if not (0 <= threshold <= 1.0):
raise ValueError("The value for 'threshold' must be between 0 and 1.")
# Zero nodes are just returned and possible utilization or validation checks
# must be performed elsewhere.
if nn == 0:
return 0
# The utilization is the number of processing units (np) required divided
# by the product of the number of nodes (nn) and the number of processing
# units per node (ppn).
utilization = np / (nn * ppn)
# Raise RuntimeError if the utilization is below the specified threshold.
if utilization < threshold:
raise RuntimeError(
"Low{name} utilization warning: {util:.0%}\n"
"Total resources requested would require {nn} node(s), "
"but each node supports up to {ppn}{name} task(s).\n"
"Requesting {np} total{name} task(s) would result in node underutilization. "
"Use --force to ignore the warning, but users are encouraged to use --pretend to "
"confirm that the submission script fully utilizes the compute resources before "
"force submission" .format(
util=utilization, np=np, nn=nn, ppn=ppn,
name=' {}'.format(name) if name else ''))
# Everything fine, return number of nodes (nn).
return nn
|
a9d6f22ecca25d663bf8abd179f2f473debf5eea
| 284,716
|
def freq_(matrix):
"""
Function that return the numbers of frequency for each element in the matrix
Parameters:
matrix
Returns:
result(dictionary): as the key the element and as value the numbers of frequency for each element
"""
result = dict()
for array in matrix:
for el in array:
result[el] = result.get(el,0) + 1
return result
|
cc7fc49404340bb41abb6a1fea9b8815227ad3d5
| 372,318
|
from typing import Iterable
def field_lookup(obj, field_path):
"""
Lookup django model field in similar way of django query lookup.
Args:
obj (instance): Django Model instance
field_path (str): '__' separated field path
Example:
>>> from django.db import model
>>> from django.contrib.auth.models import User
>>> class Article(models.Model):
>>> title = models.CharField('title', max_length=200)
>>> author = models.ForeignKey(User, null=True,
>>> related_name='permission_test_articles_author')
>>> editors = models.ManyToManyField(User,
>>> related_name='permission_test_articles_editors')
>>> user = User.objects.create_user('test_user', 'password')
>>> article = Article.objects.create(title='test_article',
... author=user)
>>> article.editors.add(user)
>>> assert 'test_article' == field_lookup(article, 'title')
>>> assert 'test_user' == field_lookup(article, 'user__username')
>>> assert ['test_user'] == list(field_lookup(article,
... 'editors__username'))
"""
if hasattr(obj, 'iterator'):
return (field_lookup(x, field_path) for x in obj.iterator())
elif isinstance(obj, Iterable):
return (field_lookup(x, field_path) for x in iter(obj))
# split the path
field_path = field_path.split('__', 1)
if len(field_path) == 1:
return getattr(obj, field_path[0], None)
return field_lookup(field_lookup(obj, field_path[0]), field_path[1])
|
38bdf5efa75fc9f8273d1a40f719a700b86aa026
| 38,880
|
def not_0(upper, lower):
"""Fills both values to whichever is not equal to 0, or leaves in place."""
# Note that I compare the values to zero istead of using if not lower
# This is because the values coming in can be anything, including an
# empty list, which should be considered nonzero
if upper == 0:
return lower, lower
elif lower == 0:
return upper, upper
else:
return upper, lower
|
d436f764d79f1febe5dbfbf2407d5921ea06e2ab
| 32,228
|
import pathlib
def is_absolute(path: str) -> bool:
"""Checks if path is absolute
e.g
j.sals.fs.is_absolute('/home/rafy/') -> True
j.sals.fs.is_absolute('~/rafy/') -> False
Args:
path (str): path to check if it is absolute
Returns:
bool: True if absolute
"""
return pathlib.Path(path).is_absolute()
|
98dbb83d9db354c25f90b576de7209c480ef1e8a
| 145,492
|
def get_build_status_from_gcb(cloudbuild_api, cloud_project, build_id):
"""Returns the status of the build: |build_id| from cloudbuild_api."""
build_result = cloudbuild_api.get(projectId=cloud_project,
id=build_id).execute()
return build_result['status']
|
78520f1377a6fc77c3fe94b99243dd6756a07685
| 505,393
|
import re
def rm_quotation_marks(sent):
""" Remove single quotes used as quotation marks (e.g. some 'phrase in quotes')
Remove double quotes used as quotation marks (e.g. some "phrase in quotes" or
``phrase in quotes'')
"""
sent = re.sub(r"\s'([\w\s]+[\w])'\s", r' \1 ', sent)
return re.sub(r'["“”‘’]', r' ', sent)
|
0977773e9b9b72af9566ca3c31d3c30f9800e165
| 621,600
|
def _set_current_port(app_definition, service_port):
"""Set the service port on the provided app definition.
This works for both Dockerised and non-Dockerised applications.
"""
try:
port_mappings = app_definition['container']['docker']['portMappings']
port_mappings[0]['servicePort'] = service_port
except (KeyError, IndexError):
app_definition['ports'][0] = service_port
return app_definition
|
891ec96512078cc41811a497ae4a8876728ff8e2
| 508,357
|
def makesafe(s):
"""Makes the given string "safe" by replacing spaces and lower-casing (less aggressive)"""
def rep(c):
if c.isalnum():
return c.lower()
else:
return '_'
ret = ''.join([rep(c) for c in s])
return ret
|
13d8b683c348c991007ee2463e976ac1da394d29
| 646,611
|
def _get_scripts_resource(pe):
"""Return the PYTHONSCRIPT resource entry."""
res = None
for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if entry.name and entry.name.string == b"PYTHONSCRIPT":
res = entry.directory.entries[0].directory.entries[0]
break
return res
|
3bef22589a2793b09d89c4f4552f12f1583c9274
| 43,952
|
def get_attribute_terms(product):
"""
Function to iterate through all variants of a variable product and compile a list of attribute terms from them.
:param product: Variable product and variants information
:return: list of term names
"""
attribute_terms = list()
for variation in product['variants']:
if variation['option_1_value'] not in attribute_terms:
attribute_terms.append(variation['option_1_value'])
return attribute_terms
|
3a785220eadffe20241116036e08c00b41e232fc
| 658,235
|
def improve_data(data_list: list, non_integers: list) -> list:
"""Takes a list of dictionaries containting the data, make sure all
dictionaries have the correct data, 0's all missing entries
Args:
data_list (list): list of dictionaries containing the data
non_integers (list): list of headers which should not be converted to
an integer
Returns:
list: improved list of data
"""
headers = list(data_list[0].keys())
for data in data_list:
for header in headers:
if data[header] == '':
data[header] = '0'
for data in data_list:
for header in headers:
if header not in non_integers:
data[header] = str(data[header])
data[header] = data[header].replace('*', '')
data[header] = float(data[header])
return data_list
|
c9345ebfb172a76d63c88f231fba058538a9ec77
| 99,446
|
def srun(arg_dict,qos=True,time=10,mem=4):
"""Return srun call, with certain parameters appended.
Arguments:
----------
arg_dict : dict
Dictionary of arguments passed into this script, which is used to append parameters to srun call.
qos : bool, optional
Quality of service, set to True for interactive jobs, to increase likelihood of scheduling.
mem : int, optional
The memory in GB (per node) to use for this call.
time : str, optional
Time limit to use for this call, in the form d-hh:mm:ss.
Returns:
--------
call : str
srun call with arguments appended."""
call = 'srun --time={0} --mem={1}GB --partition={2} --account={3}'.format(time,mem,arg_dict['partition'],arg_dict['account'])
if qos:
call += ' --qos qos-interactive'
if arg_dict['exclude'] != '':
call += ' --exclude={0}'.format(arg_dict['exclude'])
if arg_dict['reservation'] != '':
call += ' --reservation={0}'.format(arg_dict['reservation'])
return call
|
7769b4023a2caa9a3b0bfe47ea12af48ecf930f6
| 648,451
|
def _prep_sge_resource(resource):
"""Prepare SGE resource specifications from the command line handling special cases.
"""
resource = resource.strip()
k, v = resource.split("=")
if k in set(["ar"]):
return "#$ -%s %s" % (k, v)
else:
return "#$ -l %s" % resource
|
100c1152f5766a1bc68d289645ef56d152f98c14
| 62,579
|
def remove_duplicates(list_with_duplicates):
"""
Removes the duplicates and keeps the ordering of the original list.
For duplicates, the first occurrence is kept and the later occurrences are ignored.
Args:
list_with_duplicates: list that possibly contains duplicates
Returns:
A list with no duplicates.
"""
unique_set = set()
unique_list = []
for element in list_with_duplicates:
if element not in unique_set:
unique_set.add(element)
unique_list.append(element)
return unique_list
|
e72e26e69f9476669906ba2aa7864cec9eaf64d4
| 685,484
|
def get_rid_dict(contrib_element):
""" For an individual contributor, get the list of their associated rids.
More about rids: https://jats.nlm.nih.gov/archiving/tag-library/1.1/attribute/rid.html
Used in get_contrib_info().
:param contrib_element: An article XML element with the tag <contrib>
:return: dictionary matching each type of rid to its value for that contributor
"""
rid_dict = {}
contrib_elements = contrib_element.getchildren()
# get list of ref-types
rid_type_list = [el.attrib.get('ref-type', 'fn') for el in contrib_elements if el.tag == 'xref']
# make dict of ref-types to the actual ref numbers (rids)
for rid_type in set(rid_type_list):
rid_list = [el.attrib.get('rid', None) for el in contrib_elements if el.tag == 'xref' and el.attrib.get('ref-type', 'fn') == rid_type]
rid_dict[rid_type] = rid_list
return rid_dict
|
6e106aeb0acb1bc93569df3b5e2496882e8b8ba7
| 583,442
|
import random
def _apply_randomness(value, random_factor):
"""
Applies a random factor to the value
:param value: Input value
:param random_factor: Random factor, must be between 0 (no random) and 1 (output is between 0 and 2* value)
:return: Value with random factor applied
"""
if random_factor < 0 or random_factor > 1:
raise ValueError("Random factor must be in range 0 to 1")
return value + (random.uniform(random_factor * -1, random_factor) * value) if random_factor != 0 else value
|
f9efa3839e5ac513bf3b5f8dc4dc021d1ad0d03f
| 657,207
|
def convert_tokens_to_ids(word_to_id, tokens, max_sent_length=None, unknown_token=None):
"""
Converts tokens to integers with a predefined given mapping from word_to_id dictionary
:param Dict[str, int] vocabulary:
:param List[str] tokens:
:param Optional[str] unknown_token: The token to use for tokens that are not in the index. If
None is given then '<OOV>' is used.
:rtype: List[int]
"""
if unknown_token is None:
unknown_token = '<OOV>'
if max_sent_length is not None:
tokens = [item for sublist in tokens for item in sublist]
text_to_id = [
word_to_id[token] if token in word_to_id else word_to_id[unknown_token]
for token in tokens
]
if max_sent_length is not None:
text_to_id = [text_to_id[x:x + max_sent_length] for x in range(0, len(text_to_id), max_sent_length)]
return text_to_id
|
f28549ee24b961f775c527a7683460c2c19ca9ac
| 229,507
|
def filter_cols(df, columns=None, indices=None):
"""
Return the specified table filtered by the specified indices and
limited to the columns of interest.
"""
if columns is None:
if indices is None:
return df
return df.loc[indices]
cols = columns
if cols is None:
return None
if indices is None:
indices = slice(0, None)
return df.loc[indices, cols]
|
7111ae972b7cc44d9a6cdcbbd6f3de2187fdfec8
| 308,805
|
def remove_entitled_users_duplicates(poll_model_collection, poll_model_name):
"""
Takes all polls of the given model and removes any duplicate entries from
entitled_users_at_stop
"""
def _remove_entitled_users_duplicates(apps, schema_editor):
PollModel = apps.get_model(poll_model_collection, poll_model_name)
for poll in PollModel.objects.all():
if poll.entitled_users_at_stop:
new_entitled_users = []
entitled_users_ids = set()
for entry in poll.entitled_users_at_stop:
if entry["user_id"] not in entitled_users_ids:
entitled_users_ids.add(entry["user_id"])
new_entitled_users.append(entry)
poll.entitled_users_at_stop = new_entitled_users
poll.save(skip_autoupdate=True)
return _remove_entitled_users_duplicates
|
e3efe5edc9c60f0cb7a6648781fdde0713b4c1c0
| 476,898
|
def readATGC_buffer(buf, s, e):
"""
read numbers of A/T/G/C btw start and end in the path
:param buf: fasta buffer with \n removed
:param s: start position (inclusive), one based index
:param e: end position (inclusive), one based index
:return: numbers of A, T, G, C
"""
start = int(s)-1
end = int(e)
buffer = buf[start:end]
total_a = buffer.count('A') + buffer.count('a')
total_t = buffer.count('T') + buffer.count('t')
total_c = buffer.count('C') + buffer.count('c')
total_g = buffer.count('G') + buffer.count('g')
if total_a + total_t + total_g + total_c == 0:
print("goes here {0} {1}".format(s, e))
return (total_a, total_t, total_g, total_c)
|
c00eecf95761095ff99bd2b5c57c19349caf5769
| 132,055
|
import json
def deserialize_line(line, encoding="utf-8"):
"""
Transform a line in a file that was created as a result from a Luigi task into its metadata and main data.
:param line: Line to be serialized.
:type line: str
:param encoding: Encoding of line (default is utf-8).
:type encoding: str
"""
return json.loads(line, encoding=encoding)
|
a848c2dc505856e1706ae3e67d5657782e4187dd
| 336,293
|
def _get_intervention_string(conf):
"""
Consolidates all the parameters to one single string.
Args:
conf (dict): yaml configuration of the experiment
Returns:
(str): a string to identify type of intervention being run
Raises:
(ValueError): if RISK_MODEL is unknown
"""
if conf['RISK_MODEL'] == "":
type_of_run = "UNMITIGATED"
if conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']:
type_of_run = "LOCKDOWN"
if conf['N_BEHAVIOR_LEVELS'] > 2:
type_of_run = "POST-LOCKDOWN NO TRACING"
return type_of_run
risk_model = conf['RISK_MODEL']
n_behavior_levels = conf['N_BEHAVIOR_LEVELS']
hhld_behavior = conf['MAKE_HOUSEHOLD_BEHAVE_SAME_AS_MAX_RISK_RESIDENT']
type_of_run = f"{risk_model} | HHLD_BEHAVIOR_SAME_AS_MAX_RISK_RESIDENT: {hhld_behavior} | N_BEHAVIOR_LEVELS:{n_behavior_levels} |"
if risk_model == "digital":
type_of_run += f" N_LEVELS_USED: 2 (1st and last) |"
type_of_run += f" TRACING_ORDER:{conf['TRACING_ORDER']} |"
type_of_run += f" TRACE_SYMPTOMS: {conf['TRACE_SYMPTOMS']} |"
type_of_run += f" INTERPOLATE_USING_LOCKDOWN_CONTACTS:{conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']} |"
type_of_run += f" MODIFY_BEHAVIOR: {conf['SHOULD_MODIFY_BEHAVIOR']}"
return type_of_run
if risk_model == "transformer":
type_of_run += f" USE_ORACLE: {conf['USE_ORACLE']}"
type_of_run += f" N_LEVELS_USED: {n_behavior_levels} |"
type_of_run += f" INTERPOLATE_USING_LOCKDOWN_CONTACTS:{conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']} |"
type_of_run += f" REC_LEVEL_THRESHOLDS: {conf['REC_LEVEL_THRESHOLDS']} |"
type_of_run += f" MAX_RISK_LEVEL: {conf['MAX_RISK_LEVEL']} |"
type_of_run += f" MODIFY_BEHAVIOR: {conf['SHOULD_MODIFY_BEHAVIOR']} "
type_of_run += f"\n RISK_MAPPING: {conf['RISK_MAPPING']}"
return type_of_run
if risk_model in ['heuristicv1', 'heuristicv2', 'heuristicv3', 'heuristicv4']:
type_of_run += f" N_LEVELS_USED: {n_behavior_levels} |"
type_of_run += f" INTERPOLATE_USING_LOCKDOWN_CONTACTS:{conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']} |"
type_of_run += f" MAX_RISK_LEVEL: {conf['MAX_RISK_LEVEL']} |"
type_of_run += f" MODIFY_BEHAVIOR: {conf['SHOULD_MODIFY_BEHAVIOR']}"
return type_of_run
raise ValueError(f"Unknown risk model:{risk_model}")
|
ba8972b0fe2005829db64e7c83dba322584784b3
| 233,016
|
def rolling_mean(ts, window):
"""Calculate rolling mean of time series.
Uses pandas.DataFrame.rolling() to calculate rolling mean
of a given window size.
If more than one column of data in ts, returns rolling mean
using given window size for each column of data.
Returns nans for times before first window.
:param ts: Time series data as a pandas DataFrame.
:param window: Window size over which to calculate mean (int).
:return ts_std: DataFrame with same columns as ts but with rolling
mean in place of data column.
"""
ts_mean = ts.rolling(window).mean()
ts_mean["time"] = ts["time"] # don't want mean of time!
return ts_mean
|
8cd2933b1a9c285666a62a5edacae8477dec1d3d
| 29,173
|
def get_output_type(operation):
"""
Find the name of the output type of the specified operation. This method first explores
the responseMessages defined for the operation. If a successful responseMessage (i.e.
response code in 200's range), has been declared with a responseModel, that will be
considered output type of the operation. Otherwise, it will fallback to the 'type'
attribute defined in the operation. If neither is defined, returns None.
Args:
operation - A Swagger operation description (dictionary)
Returns:
A string representing a data type name or None
"""
if operation.get('responseMessages'):
for rm in operation['responseMessages']:
if 200 <= rm['code'] < 210 and rm.get('responseModel'):
return rm['responseModel']
if operation.get('type'):
return operation['type']
return None
|
755f7717d36867b4e16cce75bb0d5e2db5343c7b
| 611,707
|
def scale_mesh(mesh, scale):
""" This function scales the vertices to range from 0 to scale
Parameters
----------
mesh : trimesh.base.Trimesh
A Trimesh mesh object to scale
scale : int
Specifies the max for the new range
Returns
-------
scaled_mesh : trimesh.base.Trimesh
Trimesh mesh object whose vertices ranges from 0 to scale
"""
vertices = mesh.vertices
maxval = vertices.max(axis=0)
minval = vertices.min(axis=0)
max_nodes = scale/(maxval-minval)
verts_scaled = max_nodes*(vertices - minval)
scaled_mesh = mesh.copy()
scaled_mesh.vertices = verts_scaled
return scaled_mesh
|
cdec5e3ed599d9cf4412376bfef4a371a2e64f62
| 522,482
|
def symmetrize(edges):
"""Symmetrizes the adjacency."""
inv_edges = {(d, s) for s, d in edges}
return edges.union(inv_edges)
|
d22652dcd29b562f916f155f5d8709fa27e09a7b
| 503,524
|
def construct_dict(genome, *dataset_names):
"""
Construct an allele counts dict from a genome object
Parameters
----------
genome : funcgenom.Genome()
a genome as from funcgenom
*dataset_names : str
names for the datasets represented by the genome
Returns
-------
dict
a dictionary whose values are tables containing the coordinates and
allele counts
"""
return {
'coordinates': dict(
zip(
('chr', 'pos'),
(
list(z) for z in zip(
*((v.chromosome, v.position) for v in genome.variants())
)
)
)
),
**{
dataset_name: dict(
zip(
('coverage', 'ref_count'),
(
list(z) for z in zip(
*(
(
v.traits.get(dataset_name, {}).get(
'coverage'
),
v.traits.get(dataset_name, {}).get(
'ref_count'
)
)
for v in genome.variants()
)
)
)
)
)
for dataset_name in dataset_names
}
}
|
754a22084a20c90c37387bee993ca06d7347c340
| 544,394
|
import pyarrow.cuda as cuda
def pyarrow_cuda_buffer(cp_arr):
"""Return pyarrow.cuda.CudaBuffer view of cupy.ndarray.
"""
ctx = cuda.Context(cp_arr.data.device.id)
return ctx.foreign_buffer(cp_arr.data.ptr, cp_arr.nbytes)
|
ff3a774a525588a1210931f1d800d80a4d77158a
| 226,092
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.