content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
import importlib
def dynamic_import(import_path, alias=dict()):
"""dynamic import module and class
:param str import_path: syntax 'module_name:class_name'
e.g., 'deepspeech.models.u2:U2Model'
:param dict alias: shortcut for registered class
:return: imported class
"""
if import_path not in alias and ":" not in import_path:
raise ValueError("import_path should be one of {} or "
'include ":", e.g. "deepspeech.models.u2:U2Model" : '
"{}".format(set(alias), import_path))
if ":" not in import_path:
import_path = alias[import_path]
module_name, objname = import_path.split(":")
m = importlib.import_module(module_name)
return getattr(m, objname)
|
f77ae38d6f82922614fbf4c9834e982eb2a9d612
| 318,164
|
from typing import Optional
def create_next_start_key(last_evaluated_key: Optional[dict]) -> Optional[dict]:
"""
Create the 'next_start_key' that will be returned in the response body of this Lambda function.
Is identical to the 'LastEvaluatedKey' returned from DynamoDB, except the 'player_id' is removed for security.
If 'player_id' was included as a user-provided parameter to the Lambda function, then a malicious player could fetch
another player's slot metadata.
"""
if last_evaluated_key is None:
return None
return {
'slot_name': last_evaluated_key['slot_name']
}
|
2d40f6467a74a955f99e59310eb597a9851917e7
| 407,234
|
def elgamal_keygen(params):
""" generate an El Gamal key pair """
(G, o, g1, hs, g2, e) = params
d = o.random()
gamma = d * g1
return (d, gamma)
|
70394d9ec5e0ad70afc7892cf6bb6aa2469c7041
| 481,214
|
def align_decision_ref(id_human, title):
""" In German, decisions are either referred to as 'Beschluss' or
'Entscheidung'. This function shall align the term used in the
title with the term used in id_human.
"""
if 'Beschluss' in title:
return id_human
return id_human.replace('Beschluss ', 'Entscheidung ')
|
ac4f584b8e008576816d9a49dba58bc9c9a6dbc4
| 4,845
|
from typing import Dict
def get_fields(index_dict: Dict) -> Dict:
"""
:param index_dict: Elasticsearch index mapping
:return: simplified dictionary of structure with keys are field names and values as types
"""
ret_dict = {}
for key, value in index_dict.items():
if "properties" in value:
ret_dict[key] = get_fields(value["properties"])
elif "type" in value:
ret_dict[key] = value["type"]
else:
print(f"unrecognised key: {key}")
return ret_dict
|
7332fb47cb9876d0c874f2dd9b78213fe11ed0c1
| 453,482
|
def restrict_common_genes(X1, X2, genes1, genes2):
"""
Given two expression matrices and two lists of gene
identifiers corresponding to the two columns of the
matrices, re-configure the matrices to keep only
columns for genes that are common between the two
datasets.
"""
gene_to_index1 = {
gene: index
for index, gene in enumerate(genes1)
}
gene_to_index2 = {
gene: index
for index, gene in enumerate(genes2)
}
common_genes = sorted(
set(genes1) & set(genes2)
)
indices1 = [
gene_to_index1[gene]
for gene in common_genes
]
indices2 = [
gene_to_index2[gene]
for gene in common_genes
]
X1_new = X1[:,indices1]
X2_new = X2[:,indices2]
return X1_new, X2_new, common_genes
|
b2b6cdf55d817313d713a0f9f7ead6b8ab54b6e3
| 585,512
|
def _get_host_string(config):
"""
Take appropriate elements from a config dictionary and convert them into
a string of format 'host:port'.
:param dict config: Application configuration dictionary, including ES config.
"""
host = config["es-configuration"]["es-host"]
port = config["es-configuration"]["es-port"]
return "%s:%d" % (host, port)
|
b91551b34382f49f89992c1d1fc918b82ba52ac5
| 80,270
|
def str_to_bool(user_input):
"""
Convert string to Boolean value
"""
if isinstance(user_input, bool):
return user_input
else:
if user_input.lower() in ['1', 'true']:
return True
else:
return False
|
f491fb09e391c6766b8527165da21f91f9f96165
| 180,481
|
import json
def read_json(fname):
"""
Read data from a JSON file.
Parameters
----------
fname : str
Full path to the data-containing file
Returns
-------
dict
"""
with open(fname, 'w') as ff:
out = json.load(ff)
return out
|
755e56dbb69ef2e1c96e16be60e398194840a9c6
| 555,022
|
def clamp(val, at_least, at_most):
"""
Clamps a value so it doesn't exceed specified limits.
If one of the edges is not needed, it should be passed as None (consider using at_most / at_least functions).
:param at_least: Minimum possible value.
:param at_most: Maxium possible value.
:return: The clamped value.
"""
if at_least > at_most:
raise ValueError("Min value cannot be higher than max value.")
if at_most is not None:
val = min(at_most, val)
if at_least is not None:
val = max(at_least, val)
return val
|
1b1857c778ff0b34495abba9acc45acdf15203dd
| 456,265
|
def get_cells(worksheet, get_range: str):
"""
Get cells from sheet
params
------
workbook: openpyxl.WorkSheet
loaded worksheet.
get_range: str
Get cells range.
Ex: "A1:B3"
return
------
cells: Tuple[Cell]
Got cells tuple
"""
cells = worksheet[get_range]
return cells
|
179c20419975daac5913b149efb60b4cc22537d9
| 30,825
|
import shlex
def parse_line(line_or_args):
"""Parse line or list of text into (cmd, rest of args)."""
if isinstance(line_or_args, list):
return (line_or_args[0], line_or_args[1:])
try:
if " " in line_or_args:
(cmd, args) = line_or_args.split(maxsplit=1)
return (cmd, shlex.split(args))
except ValueError:
pass
return (line_or_args.strip(), None)
|
eaac1d87ed2f75162f19aa69977eccbbf28f3da2
| 540,603
|
def _GetSSHKeyListFromMetadataEntry(metadata_entry):
"""Returns a list of SSH keys (without whitespace) from a metadata entry."""
keys = []
for line in metadata_entry.split('\n'):
line_strip = line.strip()
if line_strip:
keys.append(line_strip)
return keys
|
45b51af6dde2d1fb918a05146fa5cca58222a890
| 491,590
|
def rank_genes_or_pathways(col_to_rank, DE_summary_stats, is_template):
"""
Returns the input dataframe (`DE_summary_stats`) that has been modified such that
genes are ranked by the selected statistic, `col_to_rank` (if the input is the
template experiment) or the median of the selected statistic
(if the input is the simulated experiments).
The ordering of the ranking depends on the statistic selected.
Arguments
---------
col_to_rank: str
DE statistic to use to rank genes
DE_summary_stats: df
dataframe containing gene ranking for either template or simulated experiments
is_template: bool
if the DE_summary_stats df is for the template experiment or simulated experiments
"""
# If ranking by p-value or adjusted p-value then high rank = low value
if col_to_rank in ["P.Value", "adj.P.Val", "pvalue", "padj", "FDR", "p.adjust"]:
if is_template:
DE_summary_stats["ranking"] = DE_summary_stats[col_to_rank].rank(
ascending=False
)
DE_summary_stats = DE_summary_stats.sort_values(
by=col_to_rank, ascending=True
)
else:
DE_summary_stats["ranking"] = DE_summary_stats[
(col_to_rank, "median")
].rank(ascending=False)
DE_summary_stats = DE_summary_stats.sort_values(
by=(col_to_rank, "median"), ascending=True
)
# If ranking by logFC then high rank = high abs(value)
elif col_to_rank in ["logFC", "t", "log2FoldChange", "ES"]:
if is_template:
DE_summary_stats["ranking"] = DE_summary_stats[col_to_rank].rank(
ascending=True
)
DE_summary_stats = DE_summary_stats.sort_values(
by=col_to_rank, ascending=False
)
else:
DE_summary_stats["ranking"] = DE_summary_stats[
(col_to_rank, "median")
].rank(ascending=True)
DE_summary_stats = DE_summary_stats.sort_values(
by=(col_to_rank, "median"), ascending=False
)
# If ranking by Z-score then high rank = high value
else:
if is_template:
DE_summary_stats["ranking"] = DE_summary_stats[col_to_rank].rank(
ascending=True
)
DE_summary_stats = DE_summary_stats.sort_values(
by=col_to_rank, ascending=False
)
else:
DE_summary_stats["ranking"] = DE_summary_stats[
(col_to_rank, "median")
].rank(ascending=True)
DE_summary_stats = DE_summary_stats.sort_values(
by=(col_to_rank, "median"), ascending=False
)
return DE_summary_stats
|
8b9be71b4dbb2cd3d0b1c67edb25488a94795a9a
| 253,608
|
def mean(num_list):
"""
Computes the mean of a list
Parameters
-------------
num_lists: list
list to calculate mean of
Returns
-------------
mean: float
Mean of list of numbers
"""
list_mean=sum(num_list)/len(num_list)
return list_mean
|
0a88257a8ab2a493d8c153b5bddc95b66c836a12
| 547,034
|
def format_str(val, counter, values):
""" Format the given value (with method ``format``) when it is a string. """
if isinstance(val, str):
return val.format(counter=counter, values=values)
return val
|
fbb80d11705b1edbfd5cff127d56882716df0e61
| 547,636
|
def authenticated_user(client, account):
"""Create an authenticated user for a test"""
# user = G(User, email='test@gmail.com')
account.email = 'test@gmail.com'
account.set_password('my_password123')
account.save()
client.login(email='test@gmail.com', password='my_password123')
return account
|
b1156f21ca94129fbf0dee8d0b0dbac834fbf59d
| 700,452
|
def drude(x, x0=4.59, gamma=0.90, **extras):
"""Drude profile for the 2175AA bump.
:param x:
Inverse wavelength (inverse microns) at which values for the drude
profile are requested.
:param gamma:
Width of the Drude profile (inverse microns).
:param x0:
Center of the Drude profile (inverse microns).
:returns k_lambda:
The value of the Drude profile at x, normalized such that the peak is 1.
"""
#return (w * gamma)**2 / ((w**2 - w0**2)**2 + (w * gamma)**2)
return (x*gamma)**2 / ((x**2 - x0**2)**2 + (x * gamma)**2)
|
99b16b9d0a4fd179aec67109ab9fd231887a524b
| 316,891
|
def get_class(kls):
"""Returns a class given a fully qualified class name"""
parts = kls.split('.')
module = ".".join(parts[:-1])
mod = __import__(module)
for comp in parts[1:]:
mod = getattr(mod, comp)
return mod
|
395002001d67d4e9820f133288fee483e2bfba9d
| 123,009
|
def calculate_ttc(
state,
risk_args,
verbose=True):
"""
This first method to calculate ttc (time-to-collision) is just brute force.
Propagate the scene forward by tenths of a second until a collision
is detected, or the maximum horizon is reached.
Assume horizontal speed for the ego car is 0.
Assume all accelerations are 0.
Arguments:
state:
the StateHistory object that is used to get current positions / speeds for vehicles.
risk_args:
the parameters class for risk predictor, containing parameters like
collisions tolerances, horizon to use, etc.
verbose:
Bool, whether or not to print logging messages.
collision_tolerance:
Float (or None), the collision tolerance for both lateral and
longitudinal directions.
Returns:
Time to collision, or None if no collision within the given H.
"""
t = 0
while (t < risk_args.H):
t += risk_args.step
ego_pos_x = 0 # for now, assume no lateral motion.
ego_pos_y = state.get_ego_speed() * t
for veh_id in state.state_histories.keys():
this_state = state.state_histories[veh_id][-1].quantities
# most recent vehicle_state
new_pos_x = None
new_pos_y = None
if "distance_x" in this_state and "speed_x" in this_state:
new_pos_x = this_state["distance_x"] + this_state["speed_x"]*t
if "distance_y" in this_state and "speed_y" in this_state:
new_pos_y = this_state["distance_y"] + this_state["speed_y"]*t
if (new_pos_x is not None and \
abs(new_pos_x - ego_pos_x) <= risk_args.collision_tolerance_x) \
and (new_pos_y is not None and \
abs(new_pos_y - ego_pos_y) <= risk_args.collision_tolerance_y):
if verbose:
print("calculate_ttc")
print(new_pos_x, ego_pos_x)
print(new_pos_y, ego_pos_y)
print("veh id", veh_id, "colliding in", t, "seconds")
return t
return None
|
08c9768e9672aed9862eeaa8e0055ee799e7e72c
| 541,834
|
def quantile(x, q):
"""Generate the q'th quantile of x
Arguments:
x: a numpy array
q: the quantile of interest (a float)
Returns:
the element of x closest to the qth quantile.
"""
if len(x.shape) != 1:
return None
x = x.tolist()
x.sort()
return x[int((len(x) - 1) * float(q))]
|
f9b8a702c215aa22e8ca19a1ab0ae47d122d7b16
| 417,572
|
def get_doi_from_request(request, method):
"""
Extract the doi from the request.
Args:
request (HTTPRequest): The HTTP request
Return:
a str, the DOI
"""
full_path = request.get_full_path()
bits = full_path.split(method + '/')
try:
return bits[1]
except IndexError:
return None
|
46e848e472298f24e9de5c5234a188cf9e6f62a8
| 454,896
|
def remove_special_chars(text: str, char_set=0) -> str:
"""Removes special characters from a text.
:param text: String to be cleaned.
:param char_set: 0 -> remove all ASCII special chars except for '_' & 'space';
1 -> remove invalid chars from file names
:return: Clean text.
"""
command_chars = [chr(unicode) for unicode in tuple(range(0, 32)) + (127,)]
special_chars = ([chr(unicode) for unicode in tuple(range(33, 48)) + tuple(range(58, 65)) + tuple(range(91, 95))
+ (96,) + tuple(range(123, 127))],
('\\', '/', ':', '*', '?', '"', '<', '>', '|'))
res = text
for cm in command_chars:
res = res.replace(cm, '_')
for sp in special_chars[char_set]:
res = res.replace(sp, '_')
while res.startswith('_'):
res = res[1:]
while res.endswith('_'):
res = res[:-1]
return res
|
a8579526f3875cfe9bc071b1c16cd04ddf70414f
| 254,329
|
def setTeamQorStateUsage(err=''):
""" Prints the Usage() statement for this method """
m = '%s\n\n' %err
m += ' This script set the status of a Team Branch QOR bin in Salesforce.\n'
m += ' Use one of the forms below to meet your needs.\n'
m += ' \n'
m += ' Set the sf team\'s blast5_sf1 QOR bin to "SCM-QOR Testing":\n'
m += ' teamqortest -s blast5 -n sf --tb blast5_sf1\n'
m += ' \n'
m += ' Set the sf team\'s blast5_sf1 QOR bin to "SCM-QOR Result":\n'
m += ' teamqorresult -sblast5 -n sf --tb blast5_sf1\n'
m += ' \n'
m += ' Set the sf team\'s blast5_sf1 QOR bin back to "SCM-QOR Building":\n'
m += ' teamqorbuild -sblast5 -n sf --tb blast5_sf1\n'
m += ' \n'
return m
|
abf4b076010dd8beeb81d3e071d752a758d3799c
| 548,511
|
def irc_prefix(var):
"""
Prefix a string with the irc_
:param var: Variable to prefix
:return: Prefixed variable
"""
if isinstance(var, str):
return 'irc_%s' % var.lower()
|
13a22ef74844c939b14fb26078b6ab4c93948408
| 685,871
|
def string_to_list(string) -> list:
"""Convert comma separated text to list."""
if string is None or string == "":
return []
return list(map(lambda x: x.strip("'\" "), string.split(",")))
|
a5f700fab9d4196d50f0795550f7068c9e6058f7
| 457,652
|
def colorbrewer(values, alpha=255):
"""
Return a dict of colors for the unique values.
Colors are adapted from Harrower, Mark, and Cynthia A. Brewer.
"ColorBrewer. org: an online tool for selecting colour schemes for maps."
The Cartographic Journal 40.1 (2003): 27-37.
:param values: values
:param alpha: color alphs
:return: dict of colors for the unique values.
"""
basecolors = [
[31, 120, 180],
[178, 223, 138],
[51, 160, 44],
[251, 154, 153],
[227, 26, 28],
[253, 191, 111],
[255, 127, 0],
[202, 178, 214],
[106, 61, 154],
[255, 255, 153],
[177, 89, 40]
]
unique_values = list(set(values))
return {k: basecolors[i % len(basecolors)] + [alpha] for i, k in enumerate(unique_values)}
|
795f33165b9479953dd02fc6b9076fcdcf127b52
| 175,759
|
def load_e3d_par(fp: str, comment_chars=("#",)):
"""
Loads an emod3d parameter file as a dictionary
As the original file does not have type data all values will be strings. Typing must be done manually.
Crashes if duplicate keys are found
:param fp: The path to the parameter file
:param comment_chars: Any single characters that denote the line as a comment if they are the first non whitespace character
:return: The dictionary of key:value pairs, as found in the parameter file
"""
vals = {}
with open(fp) as e3d:
for line in e3d:
if line.lstrip()[0] in comment_chars:
pass
key, value = line.split("=")
if key in vals:
raise KeyError(
f"Key {key} is in the emod3d parameter file at least twice. Resolve this before re running."
)
vals[key] = value
return vals
|
664d510f5fe05498c2c454d248a88195b7a99e26
| 533,513
|
def listfmt(lst: list) -> str:
"""
Format a list as a str with 4 decimal places of accuracy.
"""
return "(" + ", ".join([f"{x:.4f}" for x in lst]) + ")"
|
40ae1ec1885a5319964243917df434eae1c0f016
| 178,379
|
def generate_id(name, data_id: int) -> str:
"""
Overview:
Use ``self.name`` and input ``id`` to generate a unique id for next data to be inserted.
Arguments:
- data_id (:obj:`int`): Current unique id.
Returns:
- id (:obj:`str`): Id in format "BufferName_DataId".
"""
return "{}_{}".format(name, str(data_id))
|
118f4cb6b1ecff81756124f8ef29a40623e7de00
| 456,202
|
def get_cc_feature_configuration(feature_configuration):
"""Returns the C++ feature configuration in a Swift feature configuration.
Args:
feature_configuration: The Swift feature configuration, as returned from
`swift_common.configure_features`.
Returns:
A C++ `FeatureConfiguration` value (see
[`cc_common.configure_features`](https://docs.bazel.build/versions/master/skylark/lib/cc_common.html#configure_features)
for more information).
"""
return feature_configuration.cc_feature_configuration
|
d1a34092f6825c2ea04ecc28b75faf3d521c4193
| 431,884
|
def kth_largest_element(nums, left, right, k):
"""
Find kth largest element in given array (like quick sort)
:param nums: given array
:type nums: list[int]
:param left: left position to begin
:type left: int
:param right: right position to end
:type right: int
:param k: kth
:type k: int
:return: kth-largest element
:rtype: int
"""
pivot = nums[right]
i = left - 1
for j in range(left, right):
if nums[j] <= pivot:
i += 1
nums[i], nums[j] = nums[j], nums[i]
nums[i + 1], nums[right] = nums[right], nums[i + 1]
if i + 1 == k:
return nums[i + 1]
elif i + 1 > k:
return kth_largest_element(nums, left, i, k)
else:
return kth_largest_element(nums, i + 2, right, k)
|
ecc852faf3fcf70f510baf5a193af2bf6aac8662
| 587,495
|
def linear_search(sequence, target):
"""Pure implementation of linear search algorithm in Python
:param sequence: some sorted collection with comparable items
:param target: item value to search
:return: index of found item or None if item is not found
Examples:
>>> linear_search([0, 5, 7, 10, 15], 0)
0
>>> linear_search([0, 5, 7, 10, 15], 15)
4
>>> linear_search([0, 5, 7, 10, 15], 5)
1
>>> linear_search([0, 5, 7, 10, 15], 6)
"""
for index, item in enumerate(sequence):
if item == target:
return index
return None
|
6b81544bb808ef715b7e3b28af2ba5eaae6b6b61
| 447,437
|
def dependence_label(dep):
"""Return a string representing dep."""
reason, kind = dep
if kind == u'trg':
label = "%s" % (kind, )
elif kind != u'unknown':
label = "%s_%s" % (reason, kind)
else:
label = "%s" % reason
return label
|
02d4c48691f3ba9c966c402eb4cf540aa8292d96
| 372,838
|
from bs4 import BeautifulSoup
def get_links(file):
"""Get links from a webpage snapshot."""
with open(file, 'r', encoding='utf-8') as f:
soup = BeautifulSoup(f, 'html5lib')
links = soup.find_all("a", {"class": "proditem__link"})
return [link['href'] for link in links]
|
722c554fe43a695ae1149a7925269079787e80b0
| 446,334
|
def _pprint_strs(strs, max_chars=80, delimiter=', ', suffix='...',):
"""Pretty-print an iterable of strings, truncating if necessary."""
# Adapted from http://stackoverflow.com/a/250373
joined_str = delimiter.join(repr(s) for s in strs)
if len(joined_str) > max_chars:
truncated = joined_str[:max_chars + 1].split(delimiter)[0:-1]
joined_str = delimiter.join(truncated)
if joined_str:
joined_str += delimiter
joined_str += suffix
return joined_str
|
8f1d0566e262dcc0e9c66f6a7c27178013b8437a
| 235,133
|
import json
def get(last_state_file_location):
"""
Get the last_state.json file and return a dict
"""
last_state_json = None
last_state_dict = {}
with open(last_state_file_location, 'r') as a_file:
last_state_json=a_file.read()
last_state_dict = json.loads(last_state_json)
a_file.close()
return last_state_dict
|
651d4859298c6e3da6257d955ef49c24522453bc
| 604,868
|
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
args += list(params)
return args
|
d8b866526caec024cf0e2f8819df562d55abf71e
| 175,642
|
def cleanup_ocr_text(txt):
"""Do some basic cleanup to make OCR text better.
Err on the side of safety. Don't make fixes that could cause other issues.
:param txt: The txt output from the OCR engine.
:return: Txt output, cleaned up.
"""
simple_replacements = (
(u"Fi|ed", u"Filed"),
(u" Il ", u" II "),
)
for replacement in simple_replacements:
txt = txt.replace(replacement[0], replacement[1])
return txt
|
058dd6a951dcf8c302a1ae6b01ed103987c67015
| 578,085
|
def get_item(where, key, defaultvalue=None):
"""
Finds the key in the dict. The key can be made up of multiple pieces seperated with a period.
:param where: dict to search
:param key: multiple keywords combined with a period
:param defaultvalue: if the key is not found return this value
:return: either the defaultvalue or the value found
"""
x = where
for k in key.split("."):
if k in x:
x = x[k]
else:
return defaultvalue
return x
|
f322236b2a159524eb757e78e05592c9716ccaf8
| 435,734
|
def sgn(x):
"""Return the sign of x."""
return -1 if x < 0 else 1
|
7b1079578f26051d5e8b2ca1bfb0f05c3878a305
| 548,827
|
from datetime import datetime
def datetime_string_to_datetime(string: str):
"""
Convert datetime formatted string to datetime
:param string: str "%Y-%m-%d %H:%M:%S"
:return: datetime
"""
return datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
|
4ae5335164473d058207189ab0bacd3f01f7251c
| 529,936
|
def get_form_model(form):
"""Returns the model of a modelform."""
return form._meta.model
|
1fb0fd3dffd5080a546d4998da46f1ee20876dfa
| 407,946
|
import inspect
def get_caller_frame(i=1):
"""Get the caller's frame (utilizing the inspect module).
You can adjust `i` to find the i-th caller, default is 1.
"""
# We can't use inspect.stack()[1 + i][1] for the file name because ST sets
# that to a different value when inside a zip archive.
return inspect.stack()[1 + i][0]
|
9eccddda5b6ef30956dc618c5e6a4273ca0c9ed3
| 447,582
|
import math
def absolute_error(y, yhat):
"""Returns the maximal absolute error between y and yhat.
:param y: true function values
:param yhat: predicted function values
Lower is better.
>>> absolute_error([0,1,2,3], [0,0,1,1])
2.0
"""
return float(max(map(lambda x, y: math.fabs(x-y), y, yhat)))
|
c1262e042d3895a9ba06c213b27a1d5cb23c96fb
| 18,974
|
import csv
def create_dictionary(filename):
"""
Creates a dict that contains, patientid as key and diagnosis + month as value
Parameters
----------
filename: str, path to csv file
Returns
-------
dictionary with all patient data
"""
dictionary = {}
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
phase = row[0]
key = row[3]
if phase == 'ADNI1':
dx = row[11]
elif phase == 'ADNIGO' or phase == 'ADNI2':
dx = row[10]
else:
dx = row[-2]
if dx == '1' or dx == '7' or dx == '9':
dx = 'CN'
elif dx == '2' or dx == '4' or dx == '8':
dx = 'MCI'
elif dx == '3' or dx == '5' or dx == '6':
dx = 'AD'
date = row[7]
date = date.split('-')[0] + date.split('-')[1]
key = key + date
date2 = int(date[-2:])
date3 = date2 - 1
date2 = (date2 + 1) % 13
year = int(date[:4])
year2 = year
if date2 == 0:
date2 = '01'
year += 1
elif date2 < 10:
date2 = '0' + str(date2)
else:
date2 = str(date2)
if date3 == 0:
date3 = '12'
year2 -= 1
elif date3 < 10:
date3 = '0' + str(date3)
else:
date3 = str(date3)
date2 = str(year) + date2
date3 = str(year2) + date3
key2 = key[:10] + date2
key3 = key[:10] + date3
dictionary[key] = dx
dictionary[key2] = dx
dictionary[key3] = dx
dictionary['051_S_1123201202'] = 'MCI'
dictionary['051_S_1072201202'] = 'MCI'
dictionary['041_S_4014201107'] = 'CN'
return dictionary
|
15b7c946f8bddf0b1ca70dfeb9eeeb6b35495acd
| 253,670
|
import random
import string
def random_string(length):
"""
This function generates a crytographically secure random string of alphanumeric
characters of the appropriate length using the system random libraries.
"""
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
|
3e277292e60bfa1a9f77280048450eb72ee2afdd
| 181,352
|
import math
def distance(x1: float, y1: float, x2: float, y2: float):
"""
Return distance between two 2D points.
"""
dx = x2 - x1
dy = y2 - y1
return math.sqrt(dx * dx + dy * dy)
|
a956929ed37b224bc178d4755129f4b6d469a167
| 628,641
|
import re
def is_fraction(s):
"""
Determine if the input string appears to represent a fraction.
This does not include mixed numbers such as 1 1/3
:param s: A string value to check if it is formatted as a fraction.
"""
return bool(re.match(r'^-?\d+/\d+$', s))
|
10334c66120ad94204dba175b8659c2165031167
| 441,671
|
def fitting_function(x, a, b):
"""
My fitting function to be fit to the v_out to sfr surface density data
Parameters
----------
x : (vector)
the SFR surface density
a, b : (int)
constants to be fit
Returns
-------
y : (vector)
the outflow velocity
"""
return a*(x**b)
|
748a04e8cee7fecaeeefb81631a509abd65fc331
| 558,177
|
def merge_dicts(source, destination):
"""
Deeply merges two dictionaries, included nested
keys, merging lists, and updating values.
NOTE: source has precendence over duplicated keys
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge_dicts(value, node)
elif isinstance(value, list):
if key in destination:
destination[key].extend(value)
else:
destination[key] = value
else:
destination[key] = value
return destination
|
ea640cd9e6b43bf373b40085846b7c842d5e2085
| 525,583
|
def replace_value(value, arguments, parameters):
"""Test whether the string is a reference to a template parameter and (if
True) replace the value with the given argument or default value.
In the current implementation template parameters are referenced using
$[[..]] syntax.
Parameters
----------
value: string
String value in the workflow specification for a REANA template
arguments: dict
Dictionary that associates template parameter identifiers with
argument values
parameters: dict(reana_template.parameter.TemplateParameter)
Dictionary of parameter declarations for a REANA template
Returns
-------
string
"""
# Check ff the value matches the template parameter reference pattern
if value.startswith('$[[') and value.endswith(']]'):
# Extract variable name. If arguments contains a value for the variable
# we return the associated value from the dictionary. Otherwise, the
# parameter default value is returned
var = value[3:-2]
if var in arguments:
return arguments[var]
return parameters[var].default_value
else:
return value
|
d83031e484af5f0cf7d0670d11a08c8459c9dd33
| 237,651
|
def arnonA_mono_to_string(mono, latex=False, p=2):
"""
String representation of element of Arnon's A basis.
This is used by the _repr_ and _latex_ methods.
INPUT:
- ``mono`` - tuple of pairs of non-negative integers
(m,k) with `m >= k`
- ``latex`` - boolean (optional, default False), if true, output
LaTeX string
OUTPUT: ``string`` - concatenation of strings of the form
``X^{m}_{k}`` for each pair (m,k)
EXAMPLES::
sage: from sage.algebras.steenrod.steenrod_algebra_misc import arnonA_mono_to_string
sage: arnonA_mono_to_string(((1,2),(3,0)))
'X^{1}_{2} X^{3}_{0}'
sage: arnonA_mono_to_string(((1,2),(3,0)),latex=True)
'X^{1}_{2} X^{3}_{0}'
The empty tuple represents the unit element::
sage: arnonA_mono_to_string(())
'1'
"""
if len(mono) == 0:
return "1"
else:
string = ""
for (m,k) in mono:
string = string + "X^{" + str(m) + "}_{" \
+ str(k) + "} "
return string.strip(" ")
|
b3a215344f690fef8ada066fe2c5f45b5873fbcd
| 491,799
|
import ast
def _convert_str_to_tuple(string):
"""Function to convert a Python `str` object to a `tuple`.
Args:
string: The `str` to be converted.
Returns:
A `tuple` version of the string.
Raises:
ValueError: If the string is not a well formed `tuple`.
"""
# literal_eval converts strings to int, tuple, list, float and dict,
# booleans and None. It can also handle nested tuples.
# It does not, however, handle elements of type set.
try:
value = ast.literal_eval(string)
except ValueError:
# A ValueError is raised by literal_eval if the string is not well
# formed. Catch it and print out a more readable statement.
msg = 'Argument {} does not evaluate to a `tuple` object.'.format(string)
raise ValueError(msg)
except SyntaxError:
# The only other error that may be raised is a `SyntaxError` because
# `literal_eval` calls the Python in-built `compile`. This error is
# caused by parsing issues.
msg = 'Error while parsing string: {}'.format(string)
raise ValueError(msg)
# Make sure we got a tuple. If not, its an error.
if isinstance(value, tuple):
return value
else:
raise ValueError('Expected a tuple argument, got {}'.format(type(value)))
|
dda0f9cf36b78fb792e2a30ccfc058ba3e6c4dde
| 406,308
|
def get_limited_to(headers):
"""Return the user and project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A tuple of (user, project), set to None if there's no limit on
one of these.
"""
return headers.get('X-User-Id'), headers.get('X-Project-Id')
|
39ef977374be7473eb1b1b05c8d243b7b558a75e
| 348,019
|
def alphabets_input(min_length=None, max_length=None,
description="", filter_null=True):
"""
Read a string from standard. And filter input by length.
If length of input is out of defined length, read a string again.
Parameters
----------
min_length : int
Minimum length of input. None means no minimum length.
max_length : int
Maximum length of input. None means no maximum length.
description : str
Use like input(description)
filter_null : bool
If return input "". If False and input equals "", read again.
Returns
-------
alphabets_input : str
Only alphabet string or "" (only if filter_null is False).
"""
while True:
inputs = input(description)
if not filter_null and len(inputs) == 0:
return ""
if inputs.isalpha():
if min_length is not None and len(inputs) < min_length:
continue
elif max_length is not None and len(inputs) > max_length:
continue
return inputs
else:
pass
|
49a8f115dee6f05050c9204d972c08a0ad41a642
| 479,029
|
def trim_float(value: float, places: int = 2) -> float:
"""Trim a float to N places.
Args:
value: float to trim
places: decimal places to trim value to
"""
if isinstance(places, int):
value = float(f"{value:.{places}f}")
return value
|
1efff1d6cc271476cc6b512fda5d7c769ac738ac
| 136,292
|
def get_bag_of_communities(network, partition):
"""
:param network: dictionary containing for each key (each node/page) a dictionary containing the page categories.
:param partition: list of the community assignment
:return: list of dictionaries, one dictionary per community. Each dictionary contains the categories of all the
nodes of a given community as keys and the number of pages in the community that have this category as values.
"""
k = len(set(partition)) # number of communities
bags_of_categories = [{} for _ in range(k)]
for i, title in enumerate(network.keys()):
cats = network[title]['categories']
if type(partition) == list:
label = partition[i]
else:
label = partition[title]
for c in cats:
if c in bags_of_categories[label].keys():
bags_of_categories[label][c] += 1
else:
bags_of_categories[label][c] = 1
return bags_of_categories
|
615e88393b30b1989d98eeea1ac7123588a51ea9
| 701,122
|
def message_history(request):
"""
Returns message edit history for a message.
"""
return request.param
|
5a2e32f001961378a98861796a586915cc2d0613
| 205,100
|
import math
def max_uv_distance(max_fact, radius_deg, freq_hz):
"""Function to evaluate maximum uv distance for given amplitude drop.
Follows equation 1 from:
ftp://ftp.cv.nrao.edu/NRAO-staff/bcotton/Obit/BLAverage.pdf
Args:
max_fact (float) : amplitude reduction factor
radius_deg (float) : Source / FoV radius
freq_hz (float) : Observation frequency, in Hz
Returns:
Maxium distance in the UV plane over which data can be averaged,
in metres.
"""
def inv_sinc(arg):
"""Newton-Raphson method for calculating arcsinc(x), from Obit."""
x1 = 0.001
for i in range(0, 1000):
x0 = x1
a = x0 * math.pi
x1 = x0 - ((math.sin(a) / a) - arg) / \
((a * math.cos(a) - math.pi * math.sin(a)) / (a**2))
if math.fabs(x1 - x0) < 1.0e-6:
break
return x1
delta_uv = inv_sinc(1.0 / max_fact) / (radius_deg * (math.pi / 180.))
wavelength = 299792458.0 / freq_hz
delta_uv *= wavelength # convert to metres
return delta_uv
|
1b17e17efb2d72ffc611c5a95e60b510eb1596b6
| 526,314
|
import yaml
def _load_yaml(path: str):
"""
Load yaml file from path.
"""
with open(path) as file:
return yaml.load(file, Loader=yaml.SafeLoader)
|
0dd2db158727b27e54e4cce185c044ef0e725489
| 56,706
|
def is_sorted(lst):
"""Checks whether the input list is sorted."""
for i in range(len(lst) - 1):
if lst[i] > lst[i + 1]:
print(lst)
return False
return True
|
42808bedaf69bb868a02e9efc5fcfc6a5799ea81
| 184,128
|
def ImportStoryIDs(path_to_file):
"""
.. versionadded:: 0.1.0
Reads FanFiction.Net story-ids from a file, where each story-id is on
a separate line. Returns a list of strings representing story-ids.
:param path_to_file: path to a file containing story-ids from
FanFiction.Net, where each story-id is
contained on a newline.
:type path_to_file: str.
:returns: A list of strings representing the story-ids.
:rtype: list of strings.
Example:
.. code-block:: python
# File: example.py
import ffscraper as ffs
# Story-ids in this example are stored in a text file.
sids = ffs.utils.ImportStoryIDs('data/Coraline/sids.txt')
print(sids)
.. code-block:: bash
$ cat data/Coraline/sids.txt
123
344
$ python example.py
['123', '344']
.. warning::
This function was designed and tested with Unix-style paths and
end-of-line characters in mind, these have not been tested thoroughly
on Windows.
"""
with open(path_to_file) as f:
sids = f.read().splitlines()
return sids
|
350f4d993cfc1627a6b3185cfd24f213de6c62dc
| 316,415
|
def split_element(s, elements):
"""Split a string using a list of starting elements"""
candidates = []
for element in elements:
if not s.startswith(element):
continue
candidates.append(element)
if len(candidates) == 0:
return
if len(candidates) > 2:
raise Exception('Found more than 2 matching XML elements')
# If more than 1 element is found, use the longest one
found_element = max(candidates, key=len)
return "%s.%s" % (s[0:len(found_element)], s[len(found_element):])
|
052918380e14c7b98fa68cb48d19cfa4a0b25ac9
| 540,089
|
import torch
def thwc_to_cthw(data: torch.Tensor) -> torch.Tensor:
"""
Permute tensor from (time, height, weight, channel) to
(channel, height, width, time).
"""
return data.permute(3, 0, 1, 2)
|
ac808780e29ac6104c2a37f9a2050eb2ebffd2ef
| 333,099
|
def apply_bitshift_scale(x, bitshift, do_round=True):
""" applies bitshift scale to the vector x
Parameters:
- x: np.array(dtype=int), input array
- bitshift: int: bitshift to the right
- do_round: add the rounding factor if this is enabled
Returns: np.array(dtype=int), scaled vector
"""
assert bitshift >= 0
if do_round and bitshift > 0:
#x += np.sign(x) * (1 << (bitshift - 1))
x += (1 << (bitshift - 1))
# check if an overflow is happending
if x.min() < -(1 << 31) or x.max() > ((1 << 31) - 1):
raise OverflowError()
x = x >> bitshift
return x
|
27e25f967d91cb6f079360570ce284ade82973bc
| 256,544
|
def _columnExists(cursor, name, colname):
"""Tests whether a column exists in a table."""
schemaname, tablename = name.split(".")
sql = "select column_name from information_schema.columns where table_schema='{0}' and table_name='{1}' and column_name='{2}'".format(
schemaname, tablename, colname)
cursor.execute(sql)
return bool(cursor.rowcount)
|
170e4a44a11a45f9921ea6e3ac0b365b6d010227
| 312,835
|
def validate(result, selectors):
"""Check whether a result does satisfy the selectors
Parameters
----------
result : (string, dictionary)
A tuple describing a file with a valid header. The parsed header
data is saved in the dictionary.
selectors : array-like, shape (n,)
The list of selectors.
Returns
-------
bool
Whether the result does satisfy the selectors.
"""
for (attribute, selector) in selectors.items():
data = result[1]
# Ensure, that the value(s) are extracted as an array.
if attribute not in data:
value = []
else:
value = result[1][attribute]
if not hasattr(value, "__len__"):
value = [value]
selector_satisfied = False
for and_condition in selector:
if all([item in value for item in and_condition]):
selector_satisfied = True
break
if not selector_satisfied:
return False
return True
|
a264526dd8af0317fdabf879157f2933561be9be
| 283,206
|
import errno
def _convert_errno_parm(code_should_be):
""" Convert the code_should_be value to an integer
If code_should_be isn't an integer, then try to use
the code_should_be value as a errno "name" and extract
the errno value from the errno module.
"""
try:
code = int(code_should_be)
except ValueError:
try:
code = getattr(errno, code_should_be)
except AttributeError:
raise AssertionError("code_should_be '%s' is not a valid errno name" % code_should_be)
return code
|
949c9f17539d885a0fc4a51f3358fc3695c42e22
| 702,711
|
def remove_nodes_from_graph_and_dict(nodes_to_remove, gsc_SC_KM, nodes_statuses_pre_treatment):
"""Remove nodes from graph and nodes_statuses dict."""
treated_graph = gsc_SC_KM.copy()
nodes_statuses_post_treatment = nodes_statuses_pre_treatment.copy()
for node_id in nodes_to_remove:
treated_graph.remove_node(node_id)
del nodes_statuses_post_treatment[node_id]
return treated_graph, nodes_statuses_post_treatment
|
e888fc7b2133a5502e1e38f6a38e671c407af2f6
| 470,739
|
def pylong_join(count, digits_ptr='digits', join_type='unsigned long'):
"""
Generate an unrolled shift-then-or loop over the first 'count' digits.
Assumes that they fit into 'join_type'.
(((d[2] << n) | d[1]) << n) | d[0]
"""
return ('(' * (count * 2) + "(%s)" % join_type + ' | '.join(
"%s[%d])%s)" % (digits_ptr, _i, " << PyLong_SHIFT" if _i else '')
for _i in range(count-1, -1, -1)))
|
b3cda375fc2fbc922fcb7ecb7a4faa7bc581f7d8
| 691,745
|
import struct
def int_to_bytes(int_data):
"""
For a 32 bit signed int in little endian
:param int_data:
:return: bytes(); len == 4
"""
result = struct.pack('<i', int_data)
return result
|
12df0d6bb2f55aa5b86629fbfc2a312855327903
| 313,644
|
import itertools
def sparse_dict_from_array(array, magnitude_threshold=0):
"""Converts a array to a dict of nonzero-entries keyed by index-tuple."""
ret = {}
for index_tuple in itertools.product(*(map(range, array.shape))):
v = array[index_tuple]
if abs(v) > magnitude_threshold:
ret[index_tuple] = v
return ret
|
26774934744c927f1625caedffbeb17203af68ea
| 685,051
|
def GetChrom(chrom):
"""
Extract a numerical chromosome
Parameters
----------
chrom : str
Chromosome string
Returns
-------
chrom : int
Integer-value for the chromosome
X gets set to 23
"""
if "X" in chrom: return 23
if "Y" in chrom: return 24
if chrom.startswith("chr"):
return int(chrom[3:])
else:
return int(chrom)
|
61456e1192d12ef97fd185e85e122f79d74433ad
| 521,374
|
def camel_to_snake(name):
"""
Converts camelCase to snake_case variable names
Used in the Fleur parser to convert attribute names from the xml files
"""
name = name.replace('-', '')
return ''.join(['_' + c.lower() if c.isupper() else c for c in name]).lstrip('_')
|
4ce20f914fefb33a41ca0a0a5fcba27f0b3a84ba
| 101,153
|
def first_signal(stock_data, base_price, gain_loss_threshold):
"""
This function detects the first market signal after certain days. If the price went above the threshold we set
return 1. If the price went below we return -1. If the price did not go either way we return 0
:param stock_data: a list of dicts. each dictionary needs to have the keys: high, low
:param base_price: a number with regards the base price we are comparing to
:param gain_loss_threshold: a percentage we set to be the sell/buy point
:return: 1, 0, or -1
"""
for d in stock_data:
if d["high"] > base_price * (1 + gain_loss_threshold):
return 1
if d["low"] < base_price * (1 - gain_loss_threshold):
return -1
return 0
|
d15a44b792866c6ed012ceb3e6e1f0c93fd33322
| 523,006
|
def validate_ip(ip):
""" Validate if a string have an correct IP format.
Args:
ip (str): The string to be verified.
Returns:
ip (str): The string with a valid IP format.
Raises:
ValueError: In case the string hasn't a valid IP format.
"""
splitted_ip = str(ip).split('.')
if len(splitted_ip) != 4:
raise ValueError("[!] Invalid IP!")
for octet in splitted_ip:
if (int(octet) < 0 or int(octet) > 255) or (len(octet) > 1 and octet.startswith('0')):
raise ValueError("[!] Invalid IP!")
return ip
|
1f316edc1d621b1cd4f49049c9e208822c7157f9
| 211,709
|
import time
def get_s3_bucket_and_dir(bucket_arg):
"""Return the name of the S3 bucket and path to upload Lambda artifacts
to. If no additional path is provided a default value will be used.
Bucket Only: ``my-bucket``
Bucket with Path: ``my-bucket/my-path``
:param str bucket_arg: The string passed for the ``s3_bucket`` argument.
:returns Tuple containing the bucket name and path
:rtype: tuple
"""
try:
bucket, dir_ = bucket_arg.split('/', 1)
except ValueError:
bucket = bucket_arg
dir_ = f'possum-{int(time.time())}'
return bucket, dir_
|
5dee8396f9d7e6e218053dd8e30bc0e29b4452ed
| 344,698
|
def get_cloudgov_service_creds_by_instance_name(services, instance_name):
"""Retrieve credentials for a bound Cloud.gov service by instance name."""
return next(
(service.get('credentials', {}) for service in services
if service.get('instance_name') == instance_name),
{}
)
|
872faaa80294d5b69b44c5d282e82109532080a9
| 223,586
|
def compare_partial_dicts(result, expected):
"""
Make sure all the keys in expected are matched by keys in result, and
that the values stored in those keys match. Result can contain more
items than expected - those are ignored.
Used in the test_lvs, test_pvs and test_vgs tests.
"""
# return all(result[k] == expected[k] for k in expected.keys())
mismatches = 0
for k in expected.keys():
if not result[k] == expected[k]:
print("Failed for key {k}, {r} != {e}".format(k=k, r=result[k], e=expected[k]))
mismatches += 1
return mismatches == 0
|
88c16fdf45b1e8fe917f2d23f0b05c0bcf6b7b6b
| 694,393
|
def with_graph(f):
"""Call a function with self.graph.as_default() context."""
def wrapper(*args, **kwargs):
# the first argument is always the model instance
model = args[0]
with model.graph.as_default():
return f(*args, **kwargs)
return wrapper
|
2f079a76ada7ce25b12b2a98b2c89661d69616c1
| 61,784
|
import re
def get_relvaldata_id(file):
"""Returns unique relvaldata ID for a given file."""
run_id = re.search('R\d{9}', file)
run = re.search('_RelVal_([\w\d]*)-v\d__', file)
if not run:
run = re.search('GR_R_\d*_V\d*C?_([\w\d]*)-v\d__', file)
if run_id and run:
return (run_id.group(), run.group(1))
return None
|
4c5444ad6c769c49778d572d5d3559c94a2460e5
| 668,972
|
def evaluate(node, case):
"""
Evaluate a node recursively. The node's symbol string is evaluated.
:param node: Evaluated node
:type node: list
:param case: Current fitness case
:type case: list
:returns: Value of the evaluation
:rtype: float
"""
symbol = node[0]
# Identify the node symbol
if symbol == "+":
# Add the values of the node's children
return evaluate(node[1], case) + evaluate(node[2], case)
elif symbol == "-":
# Subtract the values of the node's children
return evaluate(node[1], case) - evaluate(node[2], case)
elif symbol == "*":
# Multiply the values of the node's children
return evaluate(node[1], case) * evaluate(node[2], case)
elif symbol == "/":
# Divide the value's of the nodes children. Too low values of the
# denominator returns the numerator
numerator = evaluate(node[1], case)
denominator = evaluate(node[2], case)
if abs(denominator) < 0.00001:
denominator = 1
return numerator / denominator
elif symbol.startswith("x"):
# Get the variable value
return case[int(symbol[1:])]
else:
# The symbol is a constant
return float(symbol)
|
ddcb3aad9e3e1515db99e49e1d1e61e2d41e6d0e
| 545,795
|
def range_check(low, high):
"""\
Verifies that that given range has a low lower than the high.
If the condition is not met, a ValueError is raised.
Otherwise, the values are returned, but as floats.
"""
low, high = float(low), float(high)
if low >= high:
raise ValueError('low >= high')
else:
return low, high
|
1187379656e08f72c3c71d0e85d4b47a0fa9f24f
| 470,003
|
def apply_mask(image, mask):
"""
Applies a mask to an image.
The size of the image should be N x M x 3.
The mask should be a N x M array with 0.0 or 1.0 values.
"""
image[:,:,0] = image[:,:,0]*mask
image[:,:,1] = image[:,:,1]*mask
image[:,:,2] = image[:,:,2]*mask
return image
|
71ce30a10e8b7ab58aa410ace63780040ebd82e0
| 281,402
|
def build_raw_view(lines_layout, stats_human, no_stat_human='-'):
"""Convert the lines_layout to human reading stats list"""
raw = []
for v in lines_layout:
try:
raw.append(v.format(**stats_human))
except KeyError:
raw.append(no_stat_human)
return raw
|
6454131f8571bee6c69f57ff6ca129a87727acb4
| 577,790
|
def _countries_to_dict(ls: list) -> dict:
"""
convert countries db in the form of a list to a dict
:param ls: list
:return: dict
"""
res = dict()
for i in ls:
res[i[0]] = i[1]
return res
|
9bef8410c037f0adc3dcc7741fb661739e454c8e
| 118,945
|
def GetXMLTag(node):
""" Get XML tag of current node."""
return node.tag
|
cd2d15db62ec4d883887914b0f9056af1da8b48e
| 330,952
|
import collections
def array_to_points(points):
"""Transform an array of floats into a list of 2 tuples."""
point_list = []
array = collections.deque(points)
while array:
lat = array.popleft()
long = array.popleft()
point = (lat, long)
point_list.append(point)
return point_list
|
00f3e14894f38bc53597e55adce683fc35495ce4
| 95,853
|
def pretty_str(element, encoding="us-ascii", xml_declaration=True, indent=4):
"""
Gets a string of the provided XML element.
Args:
element (xml.etree.ElementTree.Element): The element to get as a string.
encoding (str): The encoding of the XML string.
xml_declaration (bool): If the declaration line is required or not.
indent (int): The number of spaces to use in the indentation.
Returns:
str -- A pretty string ready to be written in a file.
"""
def print_node(nb_indents, node):
node_str = " " * indent * nb_indents
has_children = False
for element in list(node):
has_children = True
close_it = False
if (node.text == None or node.text == "") and not has_children:
close_it = True
node_str += "<" + str(node.tag)
for name, value in node.items():
node_str += " " + name + '="' + str(value) + '"'
if close_it:
node_str += "/>\n"
elif not has_children:
node_str += ">" + str(node.text) + "</" + node.tag + ">\n"
else:
node_str += ">\n"
for element in list(node):
node_str += print_node(nb_indents + 1, element)
node_str += " " * indent * nb_indents + "</" + node.tag + ">\n"
return node_str
result = ""
#xml declaration and encoding:
if xml_declaration == True:
result += "<?xml version='1.0'"
if encoding != None and encoding != "":
result += " encoding='us-ascii'"
result += "?>\n"
#Tree:
result += print_node(0, element)
return result
|
a4c08eb04a31f7c53e0a90c57fb338bf63ae2204
| 54,877
|
def update_docstring_references(obj, ref="ref"):
"""
Updates docstring reference names to strings including the function name.
Decorator will return the same function with a modified docstring. Sphinx
likes unique names - specifically for citations, not so much for footnotes.
Parameters
-----------
obj : :class:`func` | :class:`class`
Class or function for which to update documentation references.
ref : :class:`str`
String to replace with the object name.
Returns
-------
:class:`func` | :class:`class`
Object with modified docstring.
"""
name = obj.__name__
if hasattr(obj, "__module__"):
name = obj.__module__ + "." + name
obj.__doc__ = str(obj.__doc__).replace(ref, name)
return obj
|
78a109838573d5dc42027da8a8998ff107b8ed2a
| 677,540
|
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
return (arr.reshape(h // nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols))
|
c30a2034aea460fb73604b0b050dc85e1c054bd1
| 391,998
|
import requests
def request_ct(url):
"""Performs a get request that provides a (somewhat) useful error message."""
try:
response = requests.get(url)
except:
raise IOError(
"Couldn't retrieve the data, check your search expression or try again later."
)
else:
return response
|
0c1299724ae1141358349d4fd7b98b94c9a3154f
| 626,708
|
def _signature_map(map_dict, parsed_sig):
"""Map values found in parsed gufunc signature.
Parameters
----------
map_dict : dict of str to int
Mapping from `str` dimension names to `int`. All strings in
`parsed_sig` must have entries in `map_dict`.
parsed_sig : list-like of tuples of str
gufunc signature that has already been parsed, e.g., using
`parse_gufunc_signature`.
Returns
-------
shapes : list of tuples of int
list of tuples where each tuple is the shape of an argument.
"""
shapes = [tuple(map_dict[k] for k in arg) for arg in parsed_sig]
return shapes
|
80139e1d1c4b9475d3a84645035cef6e3308ed46
| 509,512
|
def get_referer(self, request):
"""
Retrieves the referer value for the current request
being used for the handling.
The referer value shall not be trusted as it may not
be defined in the request.
:type request: Request
:param request: The request to be used.
:rtype: String
:return: The retrieved referer value (URL).
"""
# retrieves the "referer" header and returns
# the same value to the caller method
referer_header = request.get_header("Referer")
return referer_header
|
e792ff9a1c7c1d8f5a6124c8b62a881545efcc47
| 241,505
|
def _build_params(dt):
"""Takes a date and builds the parameters needed to scrape the dgm website.
:param dt: the `datetime` object
:returns: the `params` that contain the needed api information
"""
params = (('yr', dt.year),
('month', dt.month),
('dy', dt.day),
('cid', 'mc-0191cbfb6d82b4fdb92b8847a2046366'))
return params
|
477f763d81407f047ada9d4d16c0207ed0b5ad67
| 696,450
|
def length(iterator):
"""A length function for iterators
Returns the number of items in the specified iterator. Note that this
function consumes the iterator in the process.
"""
return sum(1 for _item in iterator)
|
0f63a687d9e3af1bb6f11057ce9e02cedfc0862b
| 379,843
|
def unique_edge_sizes(H):
"""A function that returns the unique edge sizes.
Parameters
----------
H : Hypergraph object
The hypergraph of interest
Returns
-------
list()
The unique edge sizes
"""
return list({len(H.edges.members(edge)) for edge in H.edges})
|
cfa1bcca6f2e55f9986e1f81a23ab9d7c94fb464
| 632,925
|
from pathlib import Path
from typing import Tuple
def _get_cache_info(path: Path) -> Tuple[float, int]:
"""
Returns a tuple of (modified_time, file_size) for the path; this tuple is
persisted to the cache, and we check the files on disk against this cached
value to determine if we need to format the file again
"""
stat = path.resolve().stat()
file_info = (stat.st_mtime, stat.st_size)
return file_info
|
ac1497ade9bb385e8f03fcb9e7f298058b9d6721
| 203,894
|
def get_number_of_outputs(output_data):
"""Get the number of output variables for a given output array."""
if not hasattr(output_data, 'shape'):
raise AttributeError(
'Output data types must have attribute "shape".'
)
if len(output_data.shape) == 1:
return 1
return output_data.shape[1]
|
efbe1c4da0b1041bbdd03b06ef6cbeccc5eac4b6
| 305,720
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.