content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import math
def position_check(inlat, inlon):
"""
Simple check to make sure that the latitude and longitude are within the bounds specified
by the ICOADS documentation. Latitude is between -90 and 90. Longitude is between -180 and 360
:param inlat: latitude
:param inlon: longitude
:type inlat: float
:type inlon: float
:return: 1 if either latitude or longitude is invalid, 0 otherwise
:return type: integer
"""
# return 1 if lat or lon is invalid, 0 otherwise
assert inlat is not None and not (math.isnan(inlat))
assert inlon is not None and not (math.isnan(inlon))
result = 0
if inlat < -90 or inlat > 90:
result = 1
if inlon < -180 or inlon > 360:
result = 1
assert result == 1 or result == 0
return result
|
7f9e6d92667cd81ad02b034fcba231fd4763f966
| 10,669
|
def normalize_email(email):
"""
Email Address Normalization.
1. Remove leading and trailing spaces.
2. Convert all ASCII characters to lowercase.
3. In gmail.com email addresses, remove the following characters from the username part of the email address:
1) The period (. (ASCII code 46)).
For example, normalize jane.doe@gmail.com to janedoe@gmail.com.
2) The plus sign (+ (ASCII code 43)) and all subsequent characters.
For example, normalize janedoe+home@gmail.com to janedoe@gmail.com.
"""
email = email.strip().lower()
(user, domain) = email.split("@")
if domain == "gmail.com":
user = user.replace(".", "").split("+")[0]
email = "@".join((user, domain))
return email
|
e128952a38cd699dffa55de0e486ee6bb245697a
| 10,670
|
def urljoin(url, suffix=""):
"""
Will join url and its suffix
Example:
"https://google.com/", "/" => "https://google.com/"
"https://google.com", "/" => "https://google.com/"
"https://google.com", "api" => "https://google.com/api"
"https://google.com", "/api" => "https://google.com/api"
"https://google.com/", "api" => "https://google.com/api"
"https://google.com/", "/api" => "https://google.com/api"
:type url: ``string``
:param url: URL string (required)
:type suffix: ``string``
:param suffix: the second part of the url
:rtype: ``string``
:return: Full joined url
"""
if url[-1:] != "/":
url = url + "/"
if suffix.startswith("/"):
suffix = suffix[1:]
return url + suffix
return url + suffix
|
cd8a81d7b427678330d1258fa5644f9d4cf631a0
| 10,671
|
import os
import json
def read_credentials():
"""Return a json data containing user's credentials
Read user's credentials from a json file
"""
script_dir = os.path.dirname(__file__)
rel_path = "credentials.json"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path) as f:
json_data = json.load(f)
return json_data
|
52c64e487f123a07f30b87509aec07655d766526
| 10,672
|
def construct_dataset(df, past_lags, future_intervals):
"""Construct demand dataset"""
# Total NEM demand for a given week
df_t = df.sum(axis=1).rename('total_demand')
# Add an interval ID
df_t = df_t.reset_index().rename_axis('interval').set_index(['year', 'week'], append=True)
# Get lags
for i in range(1, past_lags + 1):
df_t[f'lag_{i}'] = df_t['total_demand'].shift(i)
# Get lags
for i in range(1, future_intervals + 1):
df_t[f'future_{i}'] = df_t['total_demand'].shift(-i)
# Rename total_demand to lag_0
total = df_t.rename(columns={'total_demand': 'lag_0'})
# Drop missing values
total = total.dropna()
return total
|
ada01368eb23b01ea9c13f11042088818d6dab81
| 10,673
|
def keyphrase_label_from(candidate_span, element, generic_label=True):
"""Receive candidate_span and element from dataset and return keyphrase label"""
label = "NON-KEYPHRASE"
if "keyphrases" in element and\
"keyphrase-id" in candidate_span and \
candidate_span["keyphrase-id"] in element["keyphrases"]:
if not generic_label:
label = element["keyphrases"][candidate_span["keyphrase-id"]]["keyphrase-label"]
else:
label = "KEYPHRASE"
return label
|
d9f6e4a0697c441f6b597eaf0695cbdc1cc81d61
| 10,675
|
def score_string(lm, s, k, c):
"""k: UID exponent; c: string cost"""
return lm.score_string_UID(s, k) + c * len(s)
|
35533d52ec7f1c65f9b49c38b51d52c9fe8c0ef0
| 10,676
|
def get_dot_file_path(gname):
"""
For a graph named gname, this method returns the path to its dot file in
the dot_atlas directory.
Parameters
----------
gname : str
Returns
-------
str
"""
return "dot_atlas/good_bad_trols_" + gname + ".dot"
|
4ead9a2c3656718a8c088e879742962a9782ef02
| 10,677
|
def _invert_signs(signs):
""" Shall we invert signs?
Invert if first (most probable) term is negative.
"""
return signs[0] < 0
|
4135340cfbeb4fce67513a160b63304a3199cf1a
| 10,678
|
def parse_directive(source_text, directive):
"""
<Purpose>
Given the file source, 'source-text', this function will search for the given directive.
<Arguments>
source_text: The source in which we are searching for a pragma directive.
directive: The pragma directive we are searching for.
<Exceptions>
None
<Side Effects>
None
<Returns>
Return all relevant information for the specified directive:
[(Directive, Type, Argument)... ]
"""
result = []
directive_string = '#' + directive
for line in source_text.splitlines():
if line.startswith(directive_string):
stripped = line[len(directive_string):].strip()
(pragma_type, separator, arg) = stripped.partition(' ')
result.append((directive, pragma_type, arg))
return result
|
1fb538a75a530ff5c9d368dcc3601be0419fc150
| 10,680
|
def safe_xml_tag_name(
name: str, numeric_prefix: str = "tag-", empty_fallback: str = "empty-tag"
) -> str:
"""
Returns a safe xml tag name by replacing invalid characters with a dash.
:param name: The name that must be converted to a safe xml tag name.
:param numeric_prefix: An xml tag name can't start with a number, so if that is
the case, then this value will be prepended.
:param empty_fallback: An xml tag name can't be empty, so if that is the case,
then this fallback value will be returned.
:return: A safe name that can be used as xml tag.
"""
safe_name = ""
for char in name:
if char.isalnum():
safe_name += char
else:
safe_name += "-"
while "--" in safe_name:
safe_name = safe_name.replace("--", "-")
if safe_name.startswith("-"):
safe_name = safe_name[1:]
if safe_name.endswith("-"):
safe_name = safe_name[:-1]
if len(safe_name) > 0 and safe_name[0].isnumeric():
safe_name = f"{numeric_prefix}{safe_name}"
if len(safe_name) == 0:
return empty_fallback
return safe_name
|
fea34367fbc7f2a4b9dbe23d11c70ecb75dad3da
| 10,681
|
def list_multipart_upload(resource, bucket_name, prefix=""):
"""List in-progress multipart uploads"""
client = resource.meta.client
mpupload = client.list_multipart_uploads(Bucket=bucket_name, Prefix=prefix)
return {
"Uploads": mpupload.get("Uploads"),
"CommonPrefixes": mpupload.get("CommonPrefixes"),
}
|
669b852a8c38acc58f1a4dad833a03d011bb4e14
| 10,683
|
def tcl_prep_otaver(ota=None):
"""
Prepare variables for OTA versus full check.
:param ota: The starting version if OTA, None if not. Default is None.
:type ota: str
"""
if ota is not None:
mode = 2
fvver = ota
else:
mode = 4
fvver = "AAA000"
return mode, fvver
|
b42c9af8abd9b6c2b361d906737429acf967182b
| 10,684
|
def pessoa(texto):
"""
:param texto:
:return:
"""
while True:
p = input(texto).strip().upper()
if p.isdigit() or p == '':
print('Erro, digite um nome válido por favor.')
else:
return p
|
e07755638c291a726b7681dbc013a43b21a3890b
| 10,685
|
import numpy
def calculate_weighted_statistics(values, weights, statistics):
"""
Calculates weighted statistics
:param values: pixel values
:params weights: weight of each pixel, where 0 > weight >= 1 (areas of 0 weight should be masked out first). Weights
can be thought of as the proportion of each pixel occupied by some feature of interest.
:param statistics: list of statistics to be calculated. Currently supports: MEAN, STD
:return: a list with each of the results, in the order the original statistic was requested
"""
supported_statistics = {"MEAN", "STD"}
unsupported_statistics = set(statistics).difference(supported_statistics)
if unsupported_statistics:
raise ValueError("Unsupported statistics: %s" % unsupported_statistics)
results = []
weighted_values = values * weights
for statistic in statistics:
if statistic == "MEAN":
#must account for the mask of both values and weights in calculating sum
results.append(weighted_values.sum() / numpy.ma.masked_array(weights, mask=weights.mask + values.mask).sum())
elif statistic == "STD":
results.append(weighted_values.std())
return results
|
502869b5719aa0db59f16f61ab0eaa1c21a7128a
| 10,686
|
def check_sup_x(supp, row_num, z, c):
""" delete unusable numbers from rows """
for x in range(3):
for y in range(3):
for k in row_num:
supp[x+z*3][y+c*3][k] = 0
return supp
|
f776dd426a11ac806a7c025dd6e88c244f988208
| 10,687
|
def get_name_from_key(key) -> str:
"""Given a dask collection's key, extract the collection name.
Parameters
----------
key: string or tuple
Dask collection's key, which must be either a single string or a tuple whose
first element is a string (commonly referred to as a collection's 'name'),
Examples
--------
>>> get_name_from_key("foo")
'foo'
>>> get_name_from_key(("foo-123", 1, 2))
'foo-123'
"""
if isinstance(key, tuple) and key and isinstance(key[0], str):
return key[0]
if isinstance(key, str):
return key
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
|
8a5b46a85000325932c043eb4a94864fef2d6dd4
| 10,688
|
def convert_init_dict(init_dict):
"""Convert an init_dict from an old version of pyleecan to the current one"""
# V 1.0.4 => 1.1.0: New definition for LamSlotMag + SlotMag
if init_dict["__class__"] == "MachineSIPMSM" and "magnet" not in init_dict["rotor"]:
print("Old machine version detected, Updating the LamSlotMag object")
# Moving the magnet (assume only one magnet)
assert (
len(init_dict["rotor"]["slot"]["magnet"]) == 1
), "LamSlotMag with more than one magnet per pole is no longer available (for now)"
init_dict["rotor"]["magnet"] = init_dict["rotor"]["slot"]["magnet"][0]
init_dict["rotor"]["slot"].pop("magnet")
# Update the slot with the magnet parameters
init_dict["rotor"]["slot"]["__class__"] = (
"SlotM" + init_dict["rotor"]["magnet"]["__class__"][-2:]
)
init_dict["rotor"]["slot"]["Wmag"] = init_dict["rotor"]["magnet"]["Wmag"]
init_dict["rotor"]["slot"]["Hmag"] = init_dict["rotor"]["magnet"]["Hmag"]
if "Rtop" in init_dict["rotor"]["magnet"]:
init_dict["rotor"]["slot"]["Rtopm"] = init_dict["rotor"]["magnet"]["Rtop"]
init_dict["rotor"]["magnet"]["__class__"] = "Magnet"
return init_dict
|
d73718c77b1909b150e254aa1ef4eed7fbf33c35
| 10,690
|
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (Dictionary) raw structured data to process
Returns:
Dictionary. Structured data with the following schema:
{
"uid": {
"id": integer,
"name": string
},
"gid": {
"id": integer,
"name": string
},
"groups": [
{
"id": integer,
"name": string
},
{
"id": integer,
"name": string
}
],
"context": {
"user": string,
"role": string,
"type": string,
"level": string
}
}
"""
if 'uid' in proc_data:
if 'id' in proc_data['uid']:
try:
proc_data['uid']['id'] = int(proc_data['uid']['id'])
except (ValueError):
proc_data['uid']['id'] = None
if 'gid' in proc_data:
if 'id' in proc_data['gid']:
try:
proc_data['gid']['id'] = int(proc_data['gid']['id'])
except (ValueError):
proc_data['gid']['id'] = None
if 'groups' in proc_data:
for group in proc_data['groups']:
if 'id' in group:
try:
group['id'] = int(group['id'])
except (ValueError):
group['id'] = None
return proc_data
|
b97d35b93ea08d6adcd69e0fa84c9b59be8d4419
| 10,691
|
def get_tag_type(tagtype, pairs):
"""
Given a list of (word,tag) pairs, return a list of words which are tagged as nouns/verbs/etc
The tagtype could be 'NN', 'JJ', 'VB', etc
"""
return [w for (w, tag) in pairs if tag.startswith(tagtype)]
|
383515704788e0fd6bcfd7d7f21e77be18397163
| 10,692
|
def read_data_from(file_: str) -> list:
"""Read boarding pass data from file."""
with open(file_, "r") as f:
return f.read().splitlines()
|
7315c4c284cdd2e9e1b66776c26eccabe13acdb4
| 10,693
|
import ast
def get_auxiliary_name(node: ast.AST, aux_symbol_id: str) -> str:
"""
Generates a name for auxiliary variables.
:param node: the ast node that originates the auxiliary symbol
:param aux_symbol_id: the id name of the auxiliary symbol
:return: the unique name to the symbol.
"""
return "{0}_{1}".format(aux_symbol_id, id(node))
|
d67fc8d70553265a7d5345e97877e07caf36e19a
| 10,695
|
def search_for_pod_name(details: dict, operator_id: str):
"""Get operator pod name.
Args:
details (dict): workflow manifest from pipeline runtime
operator_id (str): operator id
Returns:
dict: id and status of pod
"""
try:
if 'nodes' in details['status']:
for node in [*details['status']['nodes'].values()]:
if node['displayName'] == operator_id:
return {'name': node['id'], 'status': node['phase'], 'message': node['message']}
except KeyError:
pass
|
cc5bc532a1875145452fbe71cca54f840257c90d
| 10,696
|
def readxmol(ifile,elem,xyz):
"""
read xmol file
"""
lines = ifile.readlines()
nat = int(lines[0])
title = lines[1]
for l in lines[2:]:
type, x, y, z = l.split()
xyz.append([float(x),float(y),float(z)])
elem.append(type)
# xyz.append(l)
return nat
|
62be4300fcf30f3b2dee329418352230639ef7a1
| 10,701
|
def dicts_to_matched_tuples(dict1, dict2):
""" Converts pair of dicts to pair of matched tuples so that their elements can be compared.
Throws an exception if the set of keys in both dicts are not the same.
"""
try:
return [(dict1[k], dict2[k]) for k in set(dict1.keys() + dict2.keys())]
except KeyError:
raise RuntimeError('Dictionaries are not comparable.')
|
496dba21da3766db49934761fd8ec142aed0f1ab
| 10,702
|
def key(profile):
"""Get the last name in lower case"""
return profile["name"].split(' ')[-1].lower()
|
dd778619f601213f3cbae3504277fe6ee21ba3cd
| 10,703
|
def solution(a: list, k: int) -> list:
"""
>>> solution([], 0)
[]
>>> solution([], 100)
[]
>>> solution([1] * 100, 100) == [1] * 100
True
>>> solution([1, 3], 1)
[3, 1]
>>> solution([1, 3], 2)
[1, 3]
:param a: An array of integers
:param k: Number of rotations
:return: @a rotated @k times
"""
try:
n = len(a)
return a[k % n:] + a[:k % n]
except ZeroDivisionError:
return a
|
f1ff42df2ec1181357732ea7910b8994099dfa65
| 10,704
|
import logging
def _get_logger():
"""
Generate a logger with a stream handler.
"""
logger = logging.getLogger('epc')
hndlr = logging.StreamHandler()
hndlr.setLevel(logging.INFO)
hndlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger.addHandler(hndlr)
return logger
|
8c96d2395e1d907a389346e5e3b94f41c0b62fe3
| 10,705
|
import inspect
def is_mod_class(mod, cls):
"""Checks if a class in a module was declared in that module.
Args:
mod: the module
cls: the class
"""
return inspect.isclass(cls) and inspect.getmodule(cls) == mod
|
7a9b228995d2bf46467ef75823a1aad26d16df0e
| 10,706
|
from datetime import datetime
def getDateTime(timestamp):
"""
Converts to datetime from timestamp
:param timestamp: string (or integer) value
:return: datetime value
"""
return datetime.fromtimestamp(int(timestamp)/1e3)
|
2197806bf7372305cd048295c8c63a0269625262
| 10,707
|
def is_heavy_usage_item_scription(item_description):
"""
CNN1-HeavyUsage:m4.2xlarge
:param item_description:
:return:
"""
return len(item_description.split(";")) == 2
|
dd67d8fc192d663bf9e09c4b6325ad3afe09fd0b
| 10,708
|
def swapWordCount(wordToFreq):
"""
wordToFreq: the dict linking word to count
return freqToWord: the dict linking count to word
"""
freqToWord = {}
for wdKey in wordToFreq:
if wordToFreq[wdKey] in freqToWord:
freqToWord[wordToFreq[wdKey]].append(wdKey)
else:
freqToWord[wordToFreq[wdKey]] = [wdKey]
for freqKey in freqToWord:
freqToWord[freqKey] = sorted(freqToWord[freqKey])
return freqToWord
|
a1569f20a11303e0aa986a2ae5a1377f0bd1b14a
| 10,709
|
def _prepare_quote(quote, author, max_len=78):
"""This function processes a quote and returns a string that is ready
to be used in the fancy prompt.
"""
quote = quote.split(' ')
max_len -= 6
lines = []
cur_line = []
def _len(line):
return sum(len(elt) for elt in line) + len(line) - 1
while quote:
if not cur_line or (_len(cur_line) + len(quote[0]) - 1 <= max_len):
cur_line.append(quote.pop(0))
continue
lines.append(' | %s' % ' '.join(cur_line))
cur_line = []
if cur_line:
lines.append(' | %s' % ' '.join(cur_line))
cur_line = []
lines.append(' | %s-- %s' % (" " * (max_len - len(author) - 5), author))
return lines
|
7669af2c1dd5de482740944937229f5e87c527b5
| 10,710
|
def bt2_out():
"""Static equilibrium results from braced tower 2d."""
output = {}
output["xyz"] = {0: [0.11891271935545733, 0.04623304043571308, 0.0],
1: [-0.14550216351451895, 1.0106420665842952, 0.0],
2: [0.0, 2.0, 0.0],
3: [1.5829003695589805, 0.11137412111377487, 0.0],
4: [1.1455022100524879, 1.010642073428508, 0.0],
5: [1.0, 2.0, 0.0]}
output["force"] = {(0, 1): -1.5154917766302523,
(1, 2): -1.6714301665432025,
(1, 4): -1.0,
(1, 5): 1.0,
(1, 3): 1.0,
(2, 5): -1.0,
(2, 4): 1.0,
(3, 4): -1.1120156232900127,
(4, 5): -1.6714302901903129}
output["length"] = {(0, 1): 1.0,
(1, 2): 1.0,
(1, 4): 1.2910043735670067,
(1, 5): 1.5136063976572767,
(1, 3): 1.9483475444811331,
(2, 5): 1.0,
(2, 4): 1.5136064284036903,
(3, 4): 1.0,
(4, 5): 1.0}
output["residual"] = {0: [-0.4007185806081004, 1.4615539484361662, -0.0],
3: [0.40071844900288345, 0.5384458270605735, -0.0]}
# [-0.48639358696951673, 1.0000000124980013, -0.0]
return output
|
1f5e1ebaea1abb5bd6072f29dca68598a0366905
| 10,713
|
def _get_mass_dict(factor=1000000000, type=int):
"""
Return a Dictionary containing the masses of each aminoacid
We explicitly convert them by a factor of 1 000 000 000 (default) into integers
The values are taken from: https://proteomicsresource.washington.edu/protocols06/masses.php
"""
return dict( # In format: AA = (MONO_MASS, AVG_MASS)
G=(type(57.021463735 * factor), type(57.05132 * factor)),
A=(type(71.037113805 * factor), type(71.0779 * factor)),
S=(type(87.032028435 * factor), type(87.0773 * factor)),
P=(type(97.052763875 * factor), type(97.11518 * factor)),
V=(type(99.068413945 * factor), type(99.13106 * factor)),
T=(type(101.047678505 * factor), type(101.10388 * factor)),
C=(type(103.009184505 * factor), type(103.1429 * factor)),
L=(type(113.084064015 * factor), type(113.15764 * factor)),
I=(type(113.084064015 * factor), type(113.15764 * factor)),
N=(type(114.042927470 * factor), type(114.10264 * factor)),
D=(type(115.026943065 * factor), type(115.0874 * factor)),
Q=(type(128.058577540 * factor), type(128.12922 * factor)),
K=(type(128.094963050 * factor), type(128.17228 * factor)),
E=(type(129.042593135 * factor), type(129.11398 * factor)),
M=(type(131.040484645 * factor), type(131.19606 * factor)),
H=(type(137.058911875 * factor), type(137.13928 * factor)),
F=(type(147.068413945 * factor), type(147.17386 * factor)),
U=(type(150.953633405 * factor), type(150.3079 * factor)),
R=(type(156.101111050 * factor), type(156.18568 * factor)),
Y=(type(163.063328575 * factor), type(163.17326 * factor)),
W=(type(186.079312980 * factor), type(186.2099 * factor)),
O=(type(237.147726925 * factor), type(237.29816 * factor)),
# Special Aminoacids
J=(type(113.084064015 * factor), type(113.1594 * factor)),
X=(type(0.0 * factor), type(0.0 * factor)), # Unknown Amino Acid
Z=(type(128.55059 * factor), type(128.6231 * factor)),
B=(type(114.53495 * factor), type(114.5962 * factor)),
# Custom start and end points
__start__=(type(0), type(0)),
__end__=(type(0), type(0)),
)
|
fabdf445765acb1bde082ba63df9b22b06669ab9
| 10,714
|
import hmac
def is_authenticated(request, secret):
"""
Verify whether the user is authenticated
Args:
request (tornado.httputil.HTTPRequest): The request
secret (str): The secret to use for authentication
"""
# See https://api.slack.com/authentication/verifying-requests-from-slack for more info
timestamp = request.headers["X-Slack-Request-Timestamp"]
basestring = f"v0:{timestamp}:{request.body.decode()}".encode()
digest = (
"v0="
+ hmac.new(key=secret.encode(), msg=basestring, digestmod="sha256").hexdigest()
).encode()
signature = request.headers["X-Slack-Signature"].encode()
return hmac.compare_digest(digest, signature)
|
4d8915018f5d4e97934a581a79bb935533714817
| 10,716
|
def chi2_fun(theta, parameters_to_fit, event):
"""
Calculate chi2 for given values of parameters
Keywords :
theta: *np.ndarray*
Vector of parameter values, e.g.,
`np.array([5380., 0.5, 20.])`.
parameters_to_fit: *list* of *str*
List of names of parameters corresponding to theta, e.g.,
`['t_0', 'u_0', 't_E']`.
event: *MulensModel.Event*
Event which has datasets for which chi2 will be calculated.
Returns :
chi2: *float*
Chi2 value for given model parameters.
"""
# First we have to change the values of parameters in
# event.model.parameters to values given by theta.
for (parameter, value) in zip(parameters_to_fit, theta):
setattr(event.model.parameters, parameter, value)
# After that, calculating chi2 is trivial:
return event.get_chi2()
|
14f74f3cf64770dc1cb7e335880f445eb75ca007
| 10,719
|
def link(content, target):
"""Corresponds to ``[content](target)`` in the markup.
:param content: HTML that will go inside the tags.
:param target: a full URL, or a local ``filename.html#subtitle`` URL
"""
return '<a href="%s">%s</a>' % (target, content)
|
c0355f78db31edccf7e904b3696a169980fa796b
| 10,720
|
import typing
def calculate_default_layout_uvs(
texture_size: typing.Tuple[int, int],
box_size: typing.Tuple[int, int, int],
offset: typing.Tuple[int, int],
):
"""
Util method for calculating uv's
Cache result whenever possible!
WARNING: currently may not work correctly
:param texture_size: the size of the texture, a simple factor for the result
:param box_size: the sizes of the box
:param offset: an offset of the texture origin
:return: the uv's, to pass to e.g. box models
"""
sx, sy = texture_size
dx, dy = offset
x, y, z = box_size
x -= 1
y -= 1
z -= 1
return list(
map(
lambda e: (
(e[0] + dx) / sx,
(e[3] + dy) / sy,
(e[2] + dx) / sx,
(e[1] + dy) / sy,
),
[
(x + z, y, x + 2 * z, y + x),
(x, y, x + z, y + x),
(x, -1, x + z, y),
(x + 2 * z, -1, 2 * x + 2 * z, y),
(0, -1, z, y),
(x + z, -1, x + 2 * z, y),
],
)
)
|
9a9c49711c8dd9ec19e8fd3830e951778aa28737
| 10,724
|
def get_version_details(path):
"""Parses version file
:param path: path to version file
:return: version details
"""
with open(path, "r") as reader:
lines = reader.readlines()
data = {
line.split(" = ")[0].replace("__", ""):
line.split(" = ")[1].strip().replace("'", "")
for line in lines
}
return data
|
6ea7019e4e39b5c315e085369c6ab2bd6729d6bb
| 10,727
|
def size_to_bytes(size, largeur=6):
""" Convert a size in a bytes with k, m, g, t..."""
if size > 1073741824*1024:
return b"%*.2fT"%(largeur, size / (1073741824.*1024.))
elif size > 1073741824:
return b"%*.2fG"%(largeur, size / 1073741824.)
elif size > 1048576:
return b"%*.2fM"%(largeur, size / 1048576.)
elif size > 1024:
return b"%*.2fK"%(largeur, size / 1024.)
else:
return b"%*dB"%(largeur, size)
|
f05e74d89b710936a8253f13b9e6d804f9004b6b
| 10,728
|
import csv
def csv_writer(output_file):
"""
@brief: Get CSV writer
@param output_file: Output file handler
@return: CSV writer
"""
return csv.writer(output_file, delimiter=';', quoting=csv.QUOTE_ALL)
|
70a5d6dca84ef2b60c3fb742a38fd6b27724ecfb
| 10,729
|
import configparser
def repo_default_config():
"""Defines the config file structure and returns the configparser object"""
# config files is of microsoft INI format
ret = configparser.ConfigParser()
ret.add_section("core")
ret.set("core", "repositoryformatversion", "0")
ret.set("core", "filemode", "false")
ret.set("core", "bare", "false")
return ret
|
3b1071caaa0efd967cb075c492544435449e1be2
| 10,730
|
import re
def extractDirective(lineStr: str):
""" :param lineStr:
:return: (directive, directiveArgs) """
match = re.search(r'\.[a-zA-z][a-zA-z\d_]+', lineStr)
return match if not match else match.group()
|
390331845733d4f6d5f2c4c8318cbac693730026
| 10,731
|
def make_connect_data(one_data):
"""
接收数据,使用特定的字符拼接,返回拼接好的数据
:param one_data:
:return:
"""
datas = ['' if elem == None else elem for elem in one_data ]
data = "&#@".join([str(elem) for elem in datas])
# print(data)
return data
|
58a64cc50778d419a292e1c6ba5c55cd1030de47
| 10,733
|
import requests
def get_token(access_key, access_secret,
auth_url="https://deviceserver.creatordev.io/oauth/token"):
""" Gets device server access token. """
try:
# POST Body Payload for Auth
payload = {
'grant_type': 'password',
'username': access_key,
'password': access_secret
}
# POST Request Access Token
auth_response = requests.post(auth_url, data=payload)
# Access Token
token = auth_response.json()['access_token']
# Auth Bearer, to send on request header
bearer = "Bearer" + " "+str(token)
# GET Request Header
headers = {
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': bearer
}
return headers
except ValueError:
print('Invalid key and secret !')
|
bc401bf6ff441aa17311d12137250150aadeb686
| 10,734
|
import os
def load_kraken_db_metadata(kraken2_db):
"""
Load NCBI taxonomic name mappings to be able to convert taxids into taxonomic strings
Args:
kraken2_db (str): path to kraken2 standard database location
Returns:
names_map (dict[str:str]): the taxonomic names for each taxid node
ranks_map (dict[str:str]): the parent node for each taxid
"""
print("Loading NCBI node names")
names_path = os.path.join(kraken2_db, "taxonomy", "names.dmp")
names_map = dict()
with open(names_path) as input:
for line in input:
cut = line.rstrip().split("\t")
taxid = cut[0]
name = cut[2]
type = cut[6]
if type == "scientific name":
names_map[taxid] = name
print("Loading NCBI taxonomic ranks")
ranks_path = os.path.join(kraken2_db, "taxonomy", "nodes.dmp")
ranks_map = dict()
with open(ranks_path) as input:
for line in input.readlines():
cut = line.rstrip().split("\t")
taxid = cut[0]
parent_taxid = cut[2]
rank = cut[4]
ranks_map[taxid] = parent_taxid
return names_map, ranks_map
|
675c434eb7893b45866b7002c12c142cd0118ac6
| 10,736
|
def excess_entropy_fast(text: str, H_single, H_pair):
"""
Calculates excess entropy of given string in O(n) time complexity
:param text: an input tokenized string
:param H_single: a function that calculates H(i, x_i)
:param H_pair: a function that calculates H(x_i | x_{i-1}) = H(i, x_{i-1}, x_i)
:return: a float value which is equal to excess entropy of given input string
"""
n = len(text)
EE = 0
for i in range(n - 1):
EE += H_single(i + 1, text[i + 1]) - H_pair(i + 1, text[i], text[i + 1])
return EE
|
1e590f7577fa9b9185160eea26d3900476f56bf0
| 10,737
|
from typing import Set
import ast
def _h5attr2set(attr: str) -> Set[str]:
"""Convert an HDF5 attribute to a list of strings"""
if not attr or attr == "set()":
return set()
return ast.literal_eval(attr)
|
55aa07126efe42fa1f3437ce6206e72db58c7fd3
| 10,738
|
def velmodellayers_sdsu(ifile):
"""
Input a SDSU type velocity model file and return number of layers
as defined by SDSU. This is designed for use in the SDSU code
which required the number of layers in a file.
"""
lincount = 0
infile = open(ifile, "r")
for _ in infile:
lincount = lincount + 1
infile.close()
if lincount < 3:
return 0
else:
return lincount-2
|
922f9443b3b30cbe58639ab51ab9f183205d0dec
| 10,739
|
from typing import Any
def to_qualified_name(obj: Any) -> str:
"""
Given an object, returns its fully-qualified name, meaning a string that represents its
Python import path
Args:
- obj (Any): an importable Python object
Returns:
- str: the qualified name
"""
return obj.__module__ + "." + obj.__qualname__
|
45824d1f84a96f254274e7fe40f2ed9546ccb346
| 10,740
|
def factorial(n):
"""Factorial function implementation."""
return n * factorial(n-1) if n else 1
|
3ebaa4cd6c38773e8c8a6e1c247e0163f0d7d50a
| 10,741
|
def get_index(search, names):
""" Find index matching search in names list of 'Key|Value' """
for name_index, name in enumerate(names):
if search == name.split('|')[0]:
return name_index
return None
|
fbfc6b71b75172e2980a604f53e602c9b3cb9a84
| 10,742
|
def separate_callback_data(data):
"""Separa i dati in entrata"""
return [i for i in data.split(";")]
|
0cfef1abf910193a7ad8015862614f3659dd40d5
| 10,743
|
def _upgrading(version, current_version):
"""
>>> _upgrading('0.9.2', '1.9.2')
False
>>> _upgrading('0.11.3', '0.11.2')
True
>>> _upgrading('0.10.2', '0.9.2')
True
>>> _upgrading('1.1.3', '1.1.4')
False
>>> _upgrading('1.1.1', '1.1.1')
False
>>> _upgrading('0.9.1000', '50.1.1')
False
>>> _upgrading('50.1.1', '0.9.1000')
True
"""
version_parts = version.split('.')
c_version_parts = current_version.split('.')
c_version_parts[2] = c_version_parts[2].split('-')[0]
if c_version_parts == version_parts:
return False
for i in range(3):
if int(version_parts[i]) > int(c_version_parts[i]):
return True
elif int(version_parts[i]) < int(c_version_parts[i]):
return False
|
1ff478ed3c55687ddaa0392f6a071915750f8dfa
| 10,745
|
import argparse
def parse_args():
"""
parsing arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
# required argumnets:
required.add_argument("--matrix",
"-m",
dest="deeptoolsMatrix",
type=str,
metavar="STR",
help="deeptools matrix",
required=True)
required.add_argument("--output",
"-o",
dest="outputMatrix",
type=str,
metavar="STR",
help="output matrix",
required=True)
required.add_argument("--feature.tables",
"-t",
dest="tables",
nargs='+',
help="gene id tables or name based tables, tables "
"should be space-separated.",
required=True)
# optional arguments
optional.add_argument("--annotationFeature",
"-F",
dest="annotationFeature",
type=str,
help="annotation file can be filtered by a feature "
"such as gene, exon or transcript",
default=None)
optional.add_argument("--filteredGenomeGtfOutputFile",
"-oa",
dest="annotationOutput",
type=str,
help="saving filtered annotation file if "
"--annotationFeature",
default=None)
optional.add_argument("--genomeGtf",
"-g",
dest="annotation",
type=str,
metavar="STR",
help="genome annotation (gtf) to map peaks to closest gene. Will be filtered through '--annotationFeature'",
default=None)
optional.add_argument("--featureNames",
"-f",
dest="Features",
nargs='+',
help="A list of features of interest from gene id "
"tables or name based tables",
default=["log2(FC)"])
optional.add_argument("--featureIdColumn",
dest="idcolumn",
type=str,
help="name of the column includes ids/names",
default="GeneID")
optional.add_argument("--referencePoint",
dest="referencePoint",
type=str,
help="If closest TSS or TES is needed, otherwise "
"closest gene body will be found",
default=None)
optional.add_argument("--closestGenesOutput",
"-og",
dest="closestGenesOutput",
type=str,
help="A bed file to save the closest genes",
default=None)
return parser
|
ded823885e43c3899d2ef5a61c54b0bbeb53c251
| 10,746
|
def add_x_to_plotting_options(plotting_options: dict, option_cat: str, x: str, defaultvalue):
"""
don't override given plotting_options, meaning it only add the default value
if value not already defined in plotting_options
"""
if plotting_options is None:
plotting_options = {}
if option_cat not in plotting_options.keys():
plotting_options[option_cat] = {x: defaultvalue}
return plotting_options
elif x not in plotting_options[option_cat].keys():
plotting_options[option_cat][x] = defaultvalue
return plotting_options
else: # if x already specified => nothing to change there
return plotting_options
|
48ff564e58b35b8dc38d529fb27188556401c81c
| 10,747
|
def divide(a, b):
"""
Calculates the division of a by b.
If the denominator equals 0, then the result is plus infinity.
--------------------
args:
a (float): the numerator
b (float): the denominator
--------------------
return:
(float): the division of a by b
--------------------
"""
try:
return a / b
except:
return float('Inf')
|
2e7f42ed3c92c6ea95798295030262b746e48653
| 10,748
|
def get_colors_for_class_ids(class_ids):
"""Set color for class."""
colors = []
for class_id in class_ids:
if class_id == 1:
colors.append((.941, .204, .204))
return colors
|
a856d804b893bd9ba440833f178ef8b6c9743c1f
| 10,749
|
import pickle
import os
def try_read_cache(dbd_file, dbd_cache_path):
"""
Try to read a cached dbd file from the given path,
return the dbd contents or None if failed or out of date.
"""
try:
with open(dbd_cache_path, "rb") as f:
size,mtime = pickle.load(f)
stat = os.fstat(dbd_file.fileno())
if stat.st_size == size and stat.st_mtime == mtime:
return pickle.load(f)
except (TypeError, IOError):
return None # path was null, or file didn't exist was or not readable
except (EOFError, pickle.PickleError):
pass # something went wrong while reading the file
return None
|
207e7568ee2c5eefc90e9d1f7100a636c0c33e42
| 10,752
|
def _map_args(repo, args):
"""
Maps a set of arguments to a predefined set of values. Currently only
__REPO__ is support and will be replaced with the repository name.
:param repo: The repo name used for mapping.
:type repo: str
:param args: An array of arguments to map.
:type args: list
:rtype: list
"""
arg_map = {'__REPO__': repo}
mapped_args = []
for arg in args:
mapped_args.append(arg_map.get(arg, arg))
return mapped_args
|
0e31510a764c3f6dca4726daa4e8716bdc7328db
| 10,753
|
def R_curv(deltaT_sub, r_min, radius, Q_drop):
""" thermal resistance due drop curvature
Parameters
----------
deltaT_sub: float
temperature difference to the cooled wall in K
r_min: float
minimum droplet radius in m
radius: float
radius of drop in m
Q_drop: float
rate of heat flow through drop in W
Returns
----------
R_curv: float
thermal resistance due drop curvature in K/W
"""
R_curv = (deltaT_sub*r_min / radius) / Q_drop
return R_curv
|
176520b43184e879bb25bcecc50e64e6dabaa6cc
| 10,754
|
def problem057():
"""
It is possible to show that the square root of two can be expressed as an
infinite continued fraction.
√ 2 = 1 + 1/(2 + 1/(2 + 1/(2 + ... ))) = 1.414213...
By expanding this for the first four iterations, we get:
1 + 1/2 = 3/2 = 1.5
1 + 1/(2 + 1/2) = 7/5 = 1.4
1 + 1/(2 + 1/(2 + 1/2)) = 17/12 = 1.41666...
1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 = 1.41379...
The next three expansions are 99/70, 239/169, and 577/408, but the eighth
expansion, 1393/985, is the first example where the number of digits in
the numerator exceeds the number of digits in the denominator.
In the first one-thousand expansions, how many fractions contain a
numerator with more digits than denominator?
"""
limit = 1000
ans = 0
numer = 0
denom = 1
for _ in range(limit):
numer, denom = denom, denom * 2 + numer
# Now numer/denom is the i'th (0-based) continued fraction approximation of sqrt(2) - 1
if len(str(numer + denom)) > len(str(denom)):
ans += 1
return ans
|
48e0d14be2893dd23e4b13238b56d9939541b526
| 10,755
|
import subprocess
def srun(cmd, check=False):
""" run a commmand in a shell """
return subprocess.run(cmd, shell=True, check=check)
|
b84827f50b5dec3f17e0d0396b9bdd684e16556f
| 10,756
|
def _decode_list(vals):
""" List decoder
"""
return [val.decode() if hasattr(val, 'decode') else val for val in vals]
|
852629abaa25e5bc4f2388b273a0676d3afb8337
| 10,757
|
def is_empty(elem):
"""This function is just a helper function for
check whether the passed elements (i.e. list)
is empty or not
Args:
elem (:obj:): Any structured data object (i.e. list).
Returns:
bool: True if element is empty or false if not.
"""
if not elem:
return True
else:
return False
|
e449e1f765744bbc21880881550c9e87c8e83bcd
| 10,758
|
from pathlib import Path
def get_toplevel_dirpath(path):
"""
Provide the top level directory for the given path.
The top level directory contains the ``controls.json`` file.
This function returns ``None`` if a top level path can not be found.
:param path: absolute or relative path to a file or directory.
:returns: the absolute path to the top level directory.
"""
if path is None:
return
paths = list(Path(path).resolve().parents)
if Path(path).resolve().is_dir():
paths = [Path(path).resolve()] + paths
for path in paths[:-1]:
if Path(path, 'controls.json').is_file():
return str(path)
|
324df1ceda2bf5813d65f86f6319dde8819f8fa3
| 10,759
|
def list_pad(l, pad_tok, max_length):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
max_seq_len: max len for padding
Returns:
a list of list where each sublist has same length
"""
result = [l[x] if x < len(l) else pad_tok for x in range(max_length)]
return result
|
b4d3c4875d7a1a411c0e6527ed9f7550e1a01cdb
| 10,760
|
def str_(s):
"""
In python 3 turns bytes to string. In python 2 just passes string.
:param s:
:return:
"""
try:
return s.decode()
except:
return s
|
ee9c65334a201ad37db058c0e3c51a41f0650298
| 10,762
|
import pathlib
def _detect_home_location() -> pathlib.Path:
"""Detects the location of the root directory"""
# path/to/home/backend/core/config_loader.py
path_to_self = pathlib.Path(__file__).absolute()
# path/to/home/backend/core/
path_to_core_module = path_to_self.parent
# path/to/home/backend/
path_to_backend_module = path_to_core_module.parent
# path/to/home/
path_to_home = path_to_backend_module.parent
return path_to_home
|
c309656d5a56261fd96c86c179947981dc65dc58
| 10,763
|
def _to_int(hex_digit):
"""turn an hexadecimal digit into a proper integer"""
return int(hex_digit, 16)
|
2d9f0a3f5e754fc0fc932349a9a0bcb5f0066cce
| 10,765
|
from collections import defaultdict
def empty_dict(old_dict):
""" Return a dictionary of empty lists with exactly the same keys as old_dict
**Parameters**
:old_dict:
Dictionary of lists (identified by the key).
:Author: Sirko Straube
:Created: 2010/11/09
"""
new_dict=defaultdict(list)
[new_dict[i] for i in old_dict.keys()]
return new_dict
|
fd63ee3e4600bdf8467ece9ea99f5f8e20399802
| 10,766
|
def delete_volume(module, volume):
""" Delete Volume. Volume could be a snapshot."""
if not module.check_mode:
volume.delete()
changed = True
return True
|
f463b4cd1325c1a67ff26009978e653e1354ebc8
| 10,770
|
def u4u1(u4):
"""
pixel pitch -> mm
u4 = (pitch, pixel)
"""
u1 = round( u4[0] * u4[1], 2)
return u1
|
d740975308f8c0723bd4a474cae883e8d6a0290a
| 10,771
|
def normalize_numpy(img_256_arr):
"""
Normalizes an image NumPy array so its values lie in the range [0, 1]
Args:
img_256_arr: a NumPy array (intended to be 2D or 3D) whose values lie
in the range [0, 255], representing an image
Returns:
A NumPy array, with the same dimensions as the input, whose values lie
in [0, 1]
"""
return img_256_arr / 255.0
|
50fa2854c53a975487d501e6a6703b63e82def86
| 10,772
|
def keep_only_this_derivation(name_of_set_to_make,connections_list_of_dics):
"""
"""
new_connection_list_of_dics=[]
# print(name_of_set_to_make)
for connection_dic in connections_list_of_dics:
# print(connection_dic["derivation name"])
if (connection_dic["derivation name"]==name_of_set_to_make):
new_connection_list_of_dics.append(connection_dic)
return new_connection_list_of_dics
|
4ec8a010055d6f3740c489184f8b123bcd8780ee
| 10,773
|
def make_task(func):
"""make decorated function a task-creator"""
func.create_doit_tasks = func
return func
|
4a7d23765aa47f4c87723efc080913ff0c126205
| 10,774
|
import os
import requests
def clapack_header(clapack_h = "clapack.h"):
"""Return the clapack header as string (is downloaded if not found)"""
clapack_url = "http://www.netlib.org/clapack/clapack.h"
if not os.path.exists(clapack_h):
r = requests.get(clapack_url)
assert r.status_code == 200
with open(clapack_h, "wb") as io:
io.write(r.content)
return r.content
else:
return open(clapack_h).read()
|
0a86f5b805430b1434ba28cec423c5d54aa89094
| 10,775
|
def ifelse(condition, then_expression, else_expression):
"""Controls if else logic flow inside a computational graph """
if condition:
output = then_expression
else:
output = else_expression
return output
|
69190f0f9c51cd97db97d64203f51ae66370e3ff
| 10,776
|
def entradas():
"""faz as perguntas sobre o contato e retorna os dados"""
contato = {}
nome = input("qual o nome do contato: ")
contato['nome'] = nome
contato["numero"] = input("qual o telefone/celular: ")
contato["endereco"] = input("qual o endereço: ")
contato["email"] = input("qual o email dele: ")
return nome, contato
|
dbcb82d91facf4c324db6cff07d2080ca3330c6a
| 10,777
|
from pathlib import Path
def read_image_scipy2(input_filename: Path) -> np.array: # type: ignore
"""
Read an image file with scipy and return a numpy array.
:param input_filename: Source image file path.
:return: numpy array of shape (H, W), (H, W, 3).
"""
numpy_array = imageio.imread(input_filename).astype(np.float) # type: ignore
return numpy_array
|
54d3aa7d8a3043e5a79e2668be1e971b543669d9
| 10,780
|
import os
def parse_lang(filename):
"""
only support python and golang
"""
_, ext = os.path.splitext(filename)
if ext == ".py":
return "python"
elif ext == ".go":
return "go"
|
a923e048abbe5fff031ae066748e3d2fd094ddb4
| 10,781
|
from typing import List
from typing import Tuple
def sentence_from_tokens(
tokens: List[str], pos_list: List[str]
) -> Tuple[str, List[Tuple[int, int]]]:
"""
Apply some heuristics to create a sensible text text from the tokens.
Unfortunately the original whitespace information is not available.
"""
assert len(tokens) == len(pos_list)
token_list = []
out = ""
for i, (token, pos) in enumerate(zip(tokens, pos_list)):
if i < len(tokens) - 1:
next_token = tokens[i + 1]
next_pos = pos_list[i + 1]
else:
next_token = None
next_pos = None
token_list.append(tuple([len(out), len(out) + len(token)]))
if (
next_pos in ["$.", "$,"]
or token in ["``", "(", "/"]
or next_token in ["''", ")", "/"]
or next_token is None
):
out += token
else:
out += token + " "
return out, token_list
|
df212e0324efb4135f7674ea7d598d73fea7d2cb
| 10,782
|
import os
import glob
def FindTestFiles():
"""Return a list of all test files in the project."""
file_list = []
pattern = '*_test.py'
module_dir = os.path.join('.', 'plaso')
for directory, _, _ in os.walk(module_dir):
directory_pattern = os.path.join(directory, pattern)
for pattern_match in glob.iglob(directory_pattern):
if os.path.isfile(pattern_match):
file_list.append(pattern_match)
return file_list
|
f13ebfffa6c51cbaa90060356af82aeaba146e34
| 10,783
|
def check_hdr(hdr):
"""
:param hdr:
Header from a NIRPS image to be cheked against a number of QCs
We'll need to add more checks with time.
:return:
"""
keys_to_check = ['OBJECT']
output = ''
for key in keys_to_check:
if key not in hdr:
output+='missing:'+key+' '
print('Key {0} is missing'.format(key))
else:
print('Key {0} is present'.format(key))
# we consider the header all fine only if output empty string
return output
|
9626f472d5194217a7d6f794e6192aea7112154b
| 10,784
|
def process_results(results):
"""
Gets user choice that needs to be played
:param results: stores the results which we got from get_items
:return: user choice
"""
if len(results) >= 1:
temp_data = {}
count = 1
print("Here are the top {} results for your query, which one would you like to stream".format(
"5" if len(results) > 5 else str(len(results))))
for label in results.keys():
temp_data[count] = label
print(count, label)
count = count + 1
if count == 5:
print("Enter your choice:")
break
try:
choice = int(input())
if choice < 1 or choice > 4:
a = 0 / 4
else:
print("\n\nGive me 2-3 minutes to generate a video buffer....")
return [temp_data[choice], results[temp_data[choice]]]
except:
print("Try Again.. Enter a valid choice")
else:
return []
|
23d3087ddf39ab6fa94072fabaa2bcde68add2a9
| 10,786
|
def format_secret(secret):
"""
Format secret to compatible decrypt string
Args:
secret (string): KMS secret hash
Returns:
formatted ef resolvable KMS decrypt string
Raises:
None
"""
return "{{aws:kms:decrypt,%s}}" % secret
|
274a0686db07621d657ebc29eda21ec18c1d0afa
| 10,787
|
import torch
def le(a, b, square=False, **kwargs):
"""
Encodes the loss function for "a <= b". If square is false d = |a - b| is
used, else d = (a - b)^2.
"""
if square:
return torch.clamp((a - b).sign() * (a - b) * (a - b), min=0)
else:
return torch.clamp(a - b, min=0)
|
02c8a2f5255255f754be6335a7ed8c55a1910192
| 10,788
|
from typing import Dict
from typing import Any
import os
import logging
def get_key_value(d: Dict, key, default=None) -> Any:
"""Return value for the key either from the dict or environmental variable or default.
Raises error if value is not found and there is no default."""
if key not in d:
if key in os.environ:
value = os.environ[key]
logging.warning('got key "{}" from environment.'.format(key))
else:
if default is not None:
value = default
else:
raise NameError(
'key "{}" must be either in a settings json file or environmental variable'.format(key))
else:
value = d[key]
logging.warning('key "{}" is already in dict.'.format(key))
if isinstance(value, str) and len(value) == 0:
logging.warning('Zero length string for key "{}" value.'.format(key))
return value
|
57b9a859c8934b16dca5529ec53248f1b44fcec5
| 10,790
|
import sys
def batch_end(segments, bstart, batch_max):
""" Determine the batch end that will keep the
batch length under the given max. """
bi = bstart
blength = 0
while bi < len(segments) and blength < batch_max:
chrom, seg_start, seg_end = segments[bi]
blength += seg_end - seg_start
bi += 1
bend = bi
if bstart >= bend or bend > len(segments):
print("I've made a terrible mistake. On batching segments", file=sys.stderr)
exit(1)
return bend
|
437729d43c3d569861feb25e0366f84d99f1a943
| 10,791
|
import random
def get_fixed_samples(a_list, num=20000):
"""
固定数量的样本
"""
if num <= 0:
return a_list
a_n = len(a_list)
n_piece = num // a_n + 1
x_list = a_list * n_piece
random.seed(47)
random.shuffle(x_list)
x_list = x_list[:num]
return x_list
|
fe7c22329001ea0cde155a6077c28aeaffeb0a65
| 10,792
|
import random
from functools import reduce
def diceroll (dice, sides):
"""
Simulate rolling d dice of s sides each
From Python Cookbook1st Edn pg 531
See the Python Cookbook entry on this
function for the non-standard use of 'reduce'
for example diceroll(5, 6) would be 5 rolls
of a 6 sided dice
"""
def accumulate (x, y, s=sides):
return x + random.randrange(s)
return reduce(accumulate, range(dice+1) )+ dice
|
ad51a53a9df1df2c64e7ad7f361365ff8df219c6
| 10,794
|
import functools
import types
def memoize_method(func):
"""
A decorator to remember the result of the method call
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
name = '_memoize_{}'.format(func.__name__)
try:
return getattr(self, name)
except AttributeError:
result = func(self, *args, **kwargs)
if isinstance(result, types.GeneratorType):
# automatically resolve generators
result = list(result)
setattr(self, name, result)
return result
return wrapped
|
f3a43d03e9b34c7a623452868ff2d33fed299dd9
| 10,795
|
from pathlib import Path
import io
from unittest.mock import patch
async def test_upload_view(hass, hass_client, temp_dir, hass_admin_user):
"""Allow uploading media."""
img = (Path(__file__).parent.parent / "image/logo.png").read_bytes()
def get_file(name):
pic = io.BytesIO(img)
pic.name = name
return pic
client = await hass_client()
# Test normal upload
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("logo.png"),
},
)
assert res.status == 200
assert (Path(temp_dir) / "logo.png").is_file()
# Test with bad media source ID
for bad_id in (
# Subdir doesn't exist
"media-source://media_source/test_dir/some-other-dir",
# Main dir doesn't exist
"media-source://media_source/test_dir2",
# Location is invalid
"media-source://media_source/test_dir/..",
# Domain != media_source
"media-source://nest/test_dir/.",
# Completely something else
"http://bla",
):
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": bad_id,
"file": get_file("bad-source-id.png"),
},
)
assert res.status == 400
assert not (Path(temp_dir) / "bad-source-id.png").is_file()
# Test invalid POST data
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("invalid-data.png"),
"incorrect": "format",
},
)
assert res.status == 400
assert not (Path(temp_dir) / "invalid-data.png").is_file()
# Test invalid content type
text_file = io.BytesIO(b"Hello world")
text_file.name = "hello.txt"
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": text_file,
},
)
assert res.status == 400
assert not (Path(temp_dir) / "hello.txt").is_file()
# Test invalid filename
with patch(
"aiohttp.formdata.guess_filename", return_value="../invalid-filename.png"
):
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("../invalid-filename.png"),
},
)
assert res.status == 400
assert not (Path(temp_dir) / "../invalid-filename.png").is_file()
# Remove admin access
hass_admin_user.groups = []
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("no-admin-test.png"),
},
)
assert res.status == 401
assert not (Path(temp_dir) / "no-admin-test.png").is_file()
|
6bdc05a17a44c4ae016ae8c8fc40bfceb2072c7f
| 10,799
|
import argparse
import getpass
def get_args():
""" Get arguments from CLI """
parser = argparse.ArgumentParser(
description='Arguments for VM relocation')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='VC IP or FQDN')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='VC username')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='VMC password:')
parser.add_argument('-cl', '--clustername',
required=True,
action='store',
help='cluster name')
parser.add_argument('-ns', '--namespacename',
required=True,
default="my-ns",
action='store',
help='Pass DNS complaint namespace name')
parser.add_argument('-nd', '--description',
required=False,
default="My first namespace",
action='store',
help='Any description of your choice')
parser.add_argument('-role', '--nsrole',
required=True,
default="EDIT",
action='store',
help='Role for the Namespace user EDIT or VIEW')
parser.add_argument('-st', '--subjecttype',
required=True,
default="USER",
action='store',
help='Subject type i.e. USER or GROUP')
parser.add_argument('-subject', '--nsuser',
required=True,
default="Administrator",
action='store',
help='Namespace for which we need to assign the permissions with')
parser.add_argument('-domain', '--nsdomain',
required=True,
default="vsphere.local",
action='store',
help='Master management network subnet mask')
parser.add_argument('-sp', '--storagepolicy',
required=True,
action='store',
help='Storage policy name for namespace')
parser.add_argument('-slimit', '--storagelimit',
required=False,
default=None,
action='store',
help='Pass the storage limit in mebibytes i.e. 10240 for 10 GB')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter VC password:')
return args
|
82e536c056eaa558a1df1ea61d4f36a8a29384de
| 10,800
|
def process_stn_activation(df, side):
"""
Calculates STN activation percentage
:param df: dictionary containing STN volume and active STN volume for patients
:param side: side of the brain 'L' or 'R'
:return: STN activation percentage for each side for all patients
"""
#print(list(df))
df = df.sort_values(['Patient'])
left_stn_vol = df['STN vol (' + side + ') [mm3]']
left_active_stn = df['VTA inside STN (' + side + ') [mm3]']
stn_percent_activation = (left_active_stn/left_stn_vol)*100
return stn_percent_activation
|
6ed2a21e547a0ed9f35a77fed02e6f4fbf59cb6a
| 10,801
|
def combine_dictionaries(dict1,dict2):
"""append lists that share the same key, and add new keys
WARNING: this only works if the dictionaries have values that are lists"""
outdict = dict1
for key in dict2:
if key in outdict:
assert(isinstance(dict2[key],list))
assert(isinstance(outdict[key],list))
outdict[key] += dict2[key]
else:
outdict[key] = dict2[key]
return outdict
|
bc291bd8b31870ee8d04f4cb13c94c88bb97fea9
| 10,802
|
def greatest_common_divisor(larger_num, smaller_num):
"""This function uses Euclid's algorithm to calculate the Greatest Common Divisor
of two non-negative integers
pre: larger_num & smaller_num are both non-negative integers, and larger_num > smaller_num
post: returns the greatest common divisor of larger_num & smaller_num
"""
while smaller_num != 0:
remainder = larger_num % smaller_num
larger_num, smaller_num = smaller_num, remainder
return larger_num
|
70db69b222f1a4a0d395e9bb1ff87ef031dbbbd6
| 10,803
|
import argparse
def get_args():
"""
Get arguments from command line
"""
parser = argparse.ArgumentParser()
parser.add_argument("image_path", type=str, help="path to image in which to predict class label")
parser.add_argument("checkpoint", type=str, help="checkpoint in which trained model is contained")
parser.add_argument("--topk", type=int, default=5, help="number of classes to predict")
parser.add_argument("--category_names", type=str, default="cat_to_name.json",
help="file to convert label index to label names")
parser.add_argument("--gpu", type=bool, default=True,
help="use GPU or CPU to train model: True = GPU, False = CPU")
return parser.parse_args()
|
ec468187194d8c2af0182c5464452c614842091a
| 10,804
|
def _urlescape(name):
"""Escape the given name for inclusion in a URL.
Escaping is done in the manner in which AutoDuck(?) seems to be doing
it.
"""
name = name.replace(' ', '_')\
.replace('(', '.28')\
.replace(')', '.29')
return name
|
ec94492778ccdefdde7d657f8fffd953915949ae
| 10,808
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.