content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import inspect
import functools
def varargin(f):
""" Decorator to make a function able to ignore named parameters not declared in its
definition.
Arguments:
f (function): Original function.
Usage:
@varargin
def my_f(x):
# ...
is equivalent to
def my_f(x, **kwargs):
#...
Using the decorator is recommended because it makes it explicit that my_f won't
use arguments received in the kwargs dictionary.
"""
# Find the name of parameters expected by f
f_params = inspect.signature(f).parameters.values()
param_names = [p.name for p in f_params] # name of parameters expected by f
receives_kwargs = any(
[p.kind == inspect.Parameter.VAR_KEYWORD for p in f_params]
) # f receives a dictionary of **kwargs
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not receives_kwargs:
# Ignore named parameters not expected by f
kwargs = {k: kwargs[k] for k in kwargs.keys() if k in param_names}
return f(*args, **kwargs)
return wrapper
|
136658a58f0f9acab41d2969fdbec4e35e6b0de7
| 49,667
|
def createId(df1, dest_col, cols):
"""
Append flight IDs to the input table (usually the FSU table).
Parameters:
df1 (pandas.DataFrame) : input dataframe to enhance with flight identifiers
dest_col (string) base name of the identifier column
cols (list(string)) : columns from which to form the identifier
Return:
A dataframe with one additional identifier column.
The original input remains intact. The number of rows in the returned dataframe
is identical to the number of rows in the input dataframe.
"""
df = df1.copy()
df[dest_col] = df[cols[0]]
for i in range(1,len(cols)):
df[dest_col] += df[cols[i]]
return df
|
87777a104d531d5e3482e72fd6085be010662d16
| 49,672
|
def path_from_node_to_root(root, letter):
"""Recursive formula to reach a node
Args:
root: root node
letter: letter to find
Returns:
list: list of the path
"""
if root is None:
return None
elif root.get_letter() == letter:
return []
left_answer = path_from_node_to_root(root.left, letter)
if left_answer is not None:
left_answer.append(0)
return left_answer
right_answer = path_from_node_to_root(root.right, letter)
if right_answer is not None:
right_answer.append(1)
return right_answer
return None
|
2a9027d456869061a074331d10f7e2d075b2eea1
| 49,674
|
def potential_energy(fa, voltages, position, charge):
""" The potential energy of a charged particle in an electric field.
Parameters
----------
fa :: FastAdjust
voltages :: np.array([v0, v1, ... vn]) (V)
position :: np.array([x, y, z]) (m)
charge :: float64 (C)
Returns
-------
float64
"""
return charge * fa.potential_r(position, voltages)
|
9cd25052558e2ae5fd7ffd273c6b53a824c1ac6e
| 49,675
|
def personal_id(request):
"""Get the OPN personal profile ID for the authenticated profile."""
wallet_info = request.wallet_info
if 'personal_profile' in wallet_info:
return wallet_info['personal_profile']['id']
profile = wallet_info['profile']
if profile['is_individual']:
return profile['id']
# Personal profile not provided. Old version of OPN?
return ''
|
cf6a8c6a8512326e0933a6ddf02f1a5fc72b21b2
| 49,680
|
def construct_workflow_name(example, workflow_engine):
"""Construct suitable workflow name for given REANA example.
:param example: REANA example (e.g. reana-demo-root6-roofit)
:param workflow_engine: workflow engine to use (cwl, serial, yadage)
:type example: str
:type workflow_engine: str
"""
output = '{0}.{1}'.format(example.replace('reana-demo-', ''),
workflow_engine)
return output
|
b7b1d4bbde6e5d98815c1d3898b2c4e009ce13ac
| 49,682
|
def list_paragraph_styles(d):
"""
Name: list_paragraph_styles
Inputs: docx.document.Document, open word document
Output: dict, style_id (keys) with name and counts (keys) found
Features: Returns a list of all the paragraph styles found in given doc
"""
style_dict = {}
para_num = len(d.paragraphs)
for i in range(para_num):
para = d.paragraphs[i]
if para.style.style_id not in style_dict:
style_dict[para.style.style_id] = {
'name': para.style.name,
'count': 1
}
else:
style_dict[para.style.style_id]['count'] += 1
return style_dict
|
26d45798c648c5fb46eb16c3e1bea790323333c3
| 49,683
|
def ka_C_Binary_ratio(y, positive=1):
"""Find the positive ration of dependent variable
Parameters
----------
y: pandas series
binary dependent variable
positive: 1 or 0
identify which value is positive
Return
------
float value display positive rate
"""
return y.value_counts()[positive] / (y.value_counts().sum())
|
bb1cedf43764e0d66baee318ed104be522c12afe
| 49,687
|
from typing import Iterable
from typing import Sequence
def get_unique_list_in_order(list_of_lists: Iterable[Sequence]):
"""Gets unique items from a list of lists, in the order of the appearance
Args:
list_of_lists: list of lists
Returns: list of unique items in order of appearance
"""
output = []
seen = set()
for ls in list_of_lists:
for elem in ls:
if elem not in seen:
output.append(elem)
seen.add(elem)
return output
|
959c61a9a82bd7d0ee1fab63615ea979901cc0ec
| 49,691
|
import yaml
def read_config(fname="params.yaml"):
"""Function to read and return config params from yaml file
Args:
fname (str) : File name for yaml file
Returns:
dict : Dictionary with parameters from yaml file
"""
with open(fname, "r") as fs:
try:
return yaml.safe_load(fs)['Feature_Selection']
except yaml.YAMLError as exc:
print(exc)
return
|
b3c34ec2872dddd7ee4bd4d2d3b2ded15d727358
| 49,692
|
def find_uninflected_stem(stem, form):
"""
Finds all the shared caracters from left to right.
find_uninflected_stem('rAmaH', 'rAmo') => -1+aH
:param stem: form to reach by applying the diff
:param form: given form
:return: a diff: '-<number of chars to delete>+<characters to add>'
"""
i = 0
while i <= len(stem) - 1 and i <= len(form) - 1 and stem[i] == form[i]:
i += 1
stem_ending = stem[i:]
form_ending = form[i:]
if stem_ending == '' and form_ending == '':
operation = ''
else:
form_ending_len = len(form_ending)
operation = '-{}+{}'.format(form_ending_len, stem_ending)
return operation
|
db66368a4bf01d02c046bc2f5c3a208097904358
| 49,696
|
def calc_yd_line_int(play):
"""
Calculates the yard line as an integer b/w 0 - 100,
where 0 - 50 represents the opponent's side of the field,
and 50 - 100 represents the possessing team's side.
"""
if play.data['yrdln'] == '':
return None
if play.data['yrdln'] == '50':
return 50
side, yrdln = play.data['yrdln'].split(' ')
yrdln = int(yrdln)
if play.data['posteam'] == side:
return yrdln
else:
return 100 - yrdln
|
6c89bf6fd8b37bc298f467c43ed8f0d5692204d0
| 49,697
|
def delete_note(client, note_id):
"""Delete a note"""
return client.post("/delete", data=dict(
note_id=note_id
), follow_redirects=True)
|
5c9c3462223d4c96deb393a6d4657ce4ac7d38a2
| 49,700
|
def titleize(s: str) -> str:
"""
Titleizes a string, aka transforms it from underscored to English title
format
"""
s = s.replace('_', ' ')
words = s.split(' ')
new_words = [
w[0].upper() + w[1:]
for w in words
]
return ' '.join(new_words)
|
7f94a10abec10b19950f687251683a5c07166ff3
| 49,703
|
def _get_tags() -> list[str]:
"""Return test tags.
:return: Tag list.
"""
return ["Test Tag 1", "Test Tag 2"]
|
79f04b17da4e3df3c2a2572980bdc0ca18d4f796
| 49,704
|
def int2fixed(i):
"""Convert an integer to fixed point"""
return i << 16
|
7505fa97238dd2440b4aaf46483fc452a145e68a
| 49,707
|
import torch
def convert_tensors_recursively_to(val, *args, **kwargs):
""" Applies `.to(*args, **kwargs)` to each tensor inside val tree. Other values remain the same."""
if isinstance(val, torch.Tensor):
return val.to(*args, **kwargs)
if isinstance(val, (tuple, list)):
return type(val)(convert_tensors_recursively_to(item, *args, **kwargs) for item in val)
return val
|
76525788cf89732ef2eed2822ac98e2be0fe14a6
| 49,709
|
def read_country_code_file(country_code_file_path):
"""
function to read in file containing country codes
"""
with open(country_code_file_path) as file:
country_codes = [x.rstrip() for x in file]
return country_codes
|
c8aa3f8e0540a2cc55c6b976421c02dcbfc22d49
| 49,712
|
import torch
def detach(input):
"""
Detach tensor from calculation graph.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> tt = ttorch.randn({
... 'a': (2, 3),
... 'b': {'x': (3, 4)},
... })
>>> tt.requires_grad_(True)
>>> tt
<Tensor 0x7f5881338eb8>
├── a --> tensor([[ 2.5262, 0.7398, 0.7966],
│ [ 1.3164, 1.2248, -2.2494]], requires_grad=True)
└── b --> <Tensor 0x7f5881338e10>
└── x --> tensor([[ 0.3578, 0.4611, -0.6668, 0.5356],
[-1.4392, -1.2899, -0.0394, 0.8457],
[ 0.4492, -0.5188, -0.2375, -1.2649]], requires_grad=True)
>>> ttorch.detach(tt)
<Tensor 0x7f588133a588>
├── a --> tensor([[ 2.5262, 0.7398, 0.7966],
│ [ 1.3164, 1.2248, -2.2494]])
└── b --> <Tensor 0x7f588133a4e0>
└── x --> tensor([[ 0.3578, 0.4611, -0.6668, 0.5356],
[-1.4392, -1.2899, -0.0394, 0.8457],
[ 0.4492, -0.5188, -0.2375, -1.2649]])
"""
return torch.detach(input)
|
a35f70ac7401549ced5a4998c44d87b1b08c0c3b
| 49,717
|
import copy
def execute_graph_from_context(graph, context, *targets, inplace=False):
"""Execute a graph up to a target given a context.
Parameters
----------
graph : grapes Graph
Graph of the computation.
context : dict
Dictionary of the initial context of the computation (input).
targets : strings (or keys in the graph)
Indicator of what to compute (desired output).
inplace : bool
Whether to modify graph and context inplace (default: False).
Returns
-------
grapes Graph
Graph with context updated after computation.
"""
if not inplace:
graph = copy.deepcopy(graph)
context = copy.deepcopy(context)
graph.set_internal_context(context)
graph.execute_to_targets(*targets)
return graph
|
e27fcc2ed83b88821a496a996e94e569c8b0d86c
| 49,720
|
def _has_valid_shape(table):
"""Returns true if table has a rectangular shape."""
if not table.columns:
return False
if not table.rows:
return False
num_columns = len(table.columns)
for row in table.rows:
if len(row.cells) != num_columns:
return False
return True
|
8b51f96b46ae0d8b586df68ebe192410b390273d
| 49,724
|
def extract_yields_stats(yields):
"""Extract coverage, mean and std of yields."""
coverage_mask = (yields != 0.0)
return coverage_mask.mean(), yields[coverage_mask].mean(), yields[coverage_mask].std()
|
750a842dbf2081744c9031278bfd1a2d9b544007
| 49,725
|
import base64
def b64url_decode(data):
"""Decode data encoded as base64-URL"""
datalen = len(data)
# Recover base64 padding
if datalen % 4 == 3:
data += '='
elif datalen % 4 == 2:
data += '=='
return base64.b64decode(data, altchars='-_')
|
c7a97da31c600f3a92e20af7be945b4442c4d2b9
| 49,726
|
def riemann_sum(func, partition):
"""
Compute the Riemann sum for a given partition
Inputs:
- func: any single variable function
- partition: list of the form [(left, right, midpoint)]
Outputs:
- a float
"""
return sum(func(point) * (right - left) for left, right, point in partition)
|
06bed21fc0a8c95440d5e53c12148af8b19f0943
| 49,727
|
def find_variables(param_dict):
"""Finds items in dictionary that are lists and treat them as variables."""
variables = []
for key, val in param_dict.items():
if isinstance(val, list):
variables.append(key)
return variables
|
8e9c35ff85a7cea92a03aa88436999d11f64a21f
| 49,730
|
import math
def dew_point(T, RH=None):
"""
Given the relative humidity and the dry bulb (actual) temperature,
calculates the dew point (one-minute average).
The constants a and b are dimensionless, c and d are in degrees
celsius.
Using the equation from:
Buck, A. L. (1981), "New equations for computing vapor pressure
and enhancement factor", J. Appl. Meteorol. 20: 1527-1532
"""
if RH is None:
return "0.0"
d = 234.5
if T > 0:
# Use the set of constants for 0 <= T <= 50 for <= 0.05% accuracy.
b = 17.368
c = 238.88
else:
# Use the set of constants for -40 <= T <= 0 for <= 0.06% accuracy.
b = 17.966
c = 247.15
gamma = math.log(RH / 100 * math.exp((b - (T / d)) * (T / (c + T))))
return "%.2f" % ((c * gamma) / (b - gamma))
|
ce619d5b33c0108c14303202507dc98f86aa9561
| 49,731
|
def get_substr_slices(umi_length, idx_size):
"""
Create slices to split a UMI into approximately equal size substrings
Returns a list of tuples that can be passed to slice function.
"""
cs, r = divmod(umi_length, idx_size)
sub_sizes = [cs + 1] * r + [cs] * (idx_size - r)
offset = 0
slices = []
for s in sub_sizes:
slices.append((offset, offset + s))
offset += s
return slices
|
ade4601b74b38d5ef26dd8cfeb89c652b14a447a
| 49,733
|
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
|
6e25788a3c257d5b6a9ee9484a3712580b7ea2ae
| 49,735
|
def df_level(value, bounds="80:95"):
"""Convert a numeric value to "success", "warning" or "danger".
The two required bounds are given by a string.
"""
# noinspection PyTypeChecker
warning, error = [float(x) for x in bounds.split(":")]
if value < warning <= error or error <= warning < value:
return "success"
elif warning <= value < error or error < value <= warning:
return "warning"
return "danger"
|
ca43024929f5d8a7906eafc32f06994975825563
| 49,738
|
def get_offset(im, point, x_fov=1, y_fov=1):
"""
Calculate the angular offset from centre of the point, based on given
angular field of view for camera.
"""
height, width = im.shape
xpos, ypos = point
offset_x = xpos - width/2
offset_y = ypos - height/2
xscale = x_fov / width
yscale = y_fov / height
return xscale * offset_x, yscale * offset_y
|
798489c1de43c26f8a3ab1a2fd23abd01c273a96
| 49,739
|
def getVelIntSpec(mcTable, mcTable_binned, variable):
"""
Calculates the integrated reflectivity for each velocity bin
Parameters
----------
mcTable: McSnow output returned from calcParticleZe()
mcTable_binned: McSnow table output binned for a given velocity bin
variable: name of column variable wich will be integrated over a velocity bin
Returns
-------
mcTableVelIntegrated: table with the integrated reflectivity for each velocity bin
"""
mcTableVelIntegrated = mcTable.groupby(mcTable_binned)[variable].agg(['sum'])
return mcTableVelIntegrated
|
e232268ba81df0d21c9103ea49a7a786e9371dcc
| 49,744
|
def iterable(obj):
"""Check if object is iterable"""
try: iter(obj)
except Exception: return False
else: return True
|
ec4f79f62edfcdf5f1dd8fb9da0feb6ce51b7ce4
| 49,748
|
import pickle
def pickle_loader(path):
"""A loader for pickle files that contain a sample
Args:
path: Path to an audio track
Returns:
sample: A sample
"""
with open(path, 'rb') as pkl:
sample = pickle.load(pkl)
return sample
|
4ffe1822570e9e9a499076b592f7046f045d4baa
| 49,751
|
import random
import math
def pick_action(s, values, epsilon):
"""
Chooses an action for s based on an epsilon greedy strategy
:param s: the state being evaluated
:param values: The Q-values for all s-a pairs, nest Dict
:param epsilon: the threshold for random choice, governing exploration vs. exploitation
:return:
"""
if random.random() < epsilon:
return random.choice(values[s].keys())
max_q_val = -math.inf
max_action = None
for action, value in values[s].items():
if max_q_val < value:
max_q_val = value
max_action = action
return max_action
|
dc305e32cdb57d3c3d59a574c2fa466b460cdd16
| 49,753
|
import torch
def synthetic_data(w, b, num_examples):
"""
根据真实w,真实b,生成对应的label
num_examples为生成的数量
y = Xw + b + noise
"""
x = torch.randn(num_examples, len(w))
y = torch.matmul(x, w) + b
# noise
noise = torch.normal(0, 0.01, y.shape)
y += noise
return x, y.reshape(-1, 1)
|
5101f5f014f9e90529ed958987ebedcfa1bc55ac
| 49,755
|
def DESS(inString):
"""Decode a single string into two strings (inverse of ESS).
DESS is an acronym for DEcode from Single String. This function
uses the method suggested in the textbook for converting a single
string that encodes two strings back into the original two
strings. DESS is the inverse of the function ESS.
Args:
inString (str): The string to be decoded
Returns:
(str, str): A 2-tuple containing the two strings that were decoded from the input.
Example:
>>> DESS('3 abcdefg')
('abc', 'defg')
"""
# split on the first space character
(theLength, remainder) = inString.split(" ", 1)
inString1 = remainder[: int(theLength)]
inString2 = remainder[int(theLength) :]
return (inString1, inString2)
|
9db440e6449ef351c2c7a22d285e86ca2ef02348
| 49,765
|
def intersection(rectangle1,rectangle2):
"""
return True of False if rectangle1 and rectangle2 intersect
Note: rectangles may be single points as well, with xupper=xlower etc.
arguments:
rectangleX: list [xlower,ylower,xupper,yupper]
"""
xl1 = rectangle1[0]
yl1 = rectangle1[1]
xu1 = rectangle1[2]
yu1 = rectangle1[3]
xl2 = rectangle2[0]
yl2 = rectangle2[1]
xu2 = rectangle2[2]
yu2 = rectangle2[3]
nonintersection = (xl1>xu2)|(xl2>xu1)|(yl1>yu2)|(yl2>yu1)
intersection = not nonintersection
return intersection
|
c0c6ee934a768cab49abce9cb92f5ecb6c715093
| 49,768
|
from typing import List
def load_lpc_deu_news_2015_100K_sents() -> List[str]:
"""Load LPC sentences corpus, German news, year 2015, 100k examples
Sources:
--------
- https://wortschatz.uni-leipzig.de/en/download/german
"""
try:
fp = open("data/lpc/deu_news_2015_100K-sentences.txt", "r")
X = [s.split("\t")[1].strip() for s in fp.readlines()]
fp.close()
return X
except Exception as err:
raise Exception(err)
|
4200982c05d6f674280e8c78b8986a4b1a3e1988
| 49,772
|
def count_occurences(grid, number):
"""Count occurrences of number on grid."""
return sum(row.count(number) for row in grid)
|
3980ccdc57f4976c7e60e02e30ab0e4aadcf3e37
| 49,773
|
def flag8(byte, lsb=False) -> list:
"""Split a byte (as a str) into a list of 8 bits, MSB first by default."""
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))]
|
106a6654ccf52b9e77d437080fe06c28b3f26da6
| 49,774
|
def decode_gcs_url(url):
"""Decode GCS URL.
Args:
url (str): GCS URL.
Returns:
tuple: (bucket_name, file_path)
"""
split_url = url.split('/')
bucket_name = split_url[2]
file_path = '/'.join(split_url[3:])
return (bucket_name, file_path)
|
665ed672c03bf0190c981fda78c7f92c835aaa44
| 49,781
|
def generate_sas_url(
account_name: str,
account_domain: str,
container_name: str,
blob_name: str,
sas_token: str
) -> str:
"""
Generates and returns a sas url for accessing blob storage
"""
return f"https://{account_name}.{account_domain}/{container_name}/{blob_name}?{sas_token}"
|
6fedfb9cacc08fddc8e6912bb6cd5c0c2d929e9b
| 49,784
|
def box_enum(typ, val, c):
"""
Fetch an enum member given its native value.
"""
valobj = c.box(typ.dtype, val)
# Call the enum class with the value object
cls_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.instance_class))
return c.pyapi.call_function_objargs(cls_obj, (valobj,))
|
a1ea8d1a52a57874b576afc6a4c45e7d624d409e
| 49,785
|
def equal_to(trial_datum, value) -> bool:
"""Returns True if the trial_datum is equal to a given value."""
return trial_datum == value
|
d028802a7732c4472363a8f71db63505365991f8
| 49,786
|
def convert_words_to_numbers(tokens):
"""
Converts numbers in word format into number format
>>> convert_words_to_numbers(['five', "o'clock"])
['5', "o'clock"]
>>> convert_words_to_numbers(['seven', "o'clock"])
['7', "o'clock"]
"""
number_words = ["zero", "one", "two", "three", "four", "five", "six",
"seven", "eight", "nine", "ten", "eleven", "twelve"]
for index, token in enumerate(tokens):
if token.lower() in number_words:
tokens[index] = str(number_words.index(token.lower()))
return tokens
|
8fbdacf9deb46dfe59beaeee0c4551056c3cbfd7
| 49,789
|
def parseValueCoord(vcstr):
"""
Parse a value*coord ZMAT string.
E.g., "R", "1.0", "2*A", "-2.3*R", "-A"
All whitespace is removed from string before parsing.
If there is no '*' operator, then the string must be
a valid float literal or a coordinate label. A coordinate
label must begin with a letter. A leading '+' or '-' may
be added before a coordinate label (if there is no '*').
If there is a '*' operator, then the string before it must
be a valid float literal. The string after it must be a valid
coordinate label.
Parameters
----------
vcstr : str
Value-coordinate string
Returns
-------
v : float
The literal value or coefficient.
c : str
The coordinate label. This is None if there is no coordinate.
"""
vcstr = "".join(vcstr.split()) # We expect no whitespace. Remove if any.
if "*" in vcstr:
# A two factor value * coordinate string
vstr, cstr = vcstr.split('*')
if len(vstr) == 0 or len(cstr) == 0:
raise ValueError("Malformed value-coordinate string")
elif not cstr[0].isalpha():
raise ValueError("Invalid coordinate label")
else:
return (float(vstr), cstr)
else:
# A bare literal or a bare coordinate, possibly with leading '-' or '+'
if vcstr[0].isalpha():
return (1.0, vcstr[0:])
elif vcstr[0] == '-' and vcstr[1].isalpha():
return (-1.0, vcstr[1:])
elif vcstr[0] == '+' and vcstr[1].isalpha():
return (+1.0, vcstr[1:])
else:
return (float(vcstr), None)
|
751eb605e5668d9e279f36de988484aeee098190
| 49,792
|
def lambda_handler(event, context):
"""
Expects event in the form:
[ [I, [O1, O2, ... On-1], On ]
Returns:
[ I, [O1, O2, ... On ]
"""
results = event[0][1]
results.append(event[1])
return [ event[0][0], results ]
|
ea1fd8ab9d694cc566d944044d22ddf3b1034208
| 49,798
|
def calc_suma_recursiva(n:int=10) -> int:
"""
Calcula la suma recursiva de los n primeros números
Valor predefinido: 10
:param n: número de números a sumar
:type n: int
:return: suma de los n primeros números
:rtype: int
"""
if n == 0:
return 0
else:
return n + calc_suma_recursiva(n-1)
|
d17b92531270160601981f0c8683a8e324fa8a3d
| 49,800
|
import base64
def encode(encoding_input):
"""This function converts a string to base64, and removes trailing ="""
if (isinstance(encoding_input, str)):
byte = str.encode(encoding_input)
else:
byte = encoding_input
b64 = base64.urlsafe_b64encode(byte)
res = b64.decode('utf-8')
return res.replace('=', '')
|
38110b1d6f6f4570a360cb5d3bf4ac414af9c4e4
| 49,803
|
def fixture_repo_owner() -> str:
"""Return a repository owner."""
return "hackebrot"
|
6970cd459823f1a7fe8a1de64c2d7a52c90e6585
| 49,807
|
def sanitise_p_value(n_iter, p_val):
"""
Fixes p value too small or == 0 when computed form distribution
(if not even 1 sample on the other side of measured value)
:param int n_iter: The number of iterations that were performed to compute the distribution
:param float p_val:
:return:
"""
if p_val == 0:
p_val = "< {}".format(1 / n_iter)
else:
p_val = "{:.4f}".format(p_val)
return p_val
|
ff29b7a978b680fbe15fa6a4e7912f22f75af884
| 49,808
|
def split_bytes(code):
"""Split 0xABCD into 0xAB, 0xCD"""
return code >> 8, code & 0xff
|
47a98d31dcee4f9fa73d236a0b78d55586827a68
| 49,810
|
def first_negative(l):
"""
Returns the first negative element in a given list of numbers.
"""
for x in l:
if x < 0:
return x
return None
|
1e62ce772f7d38e3835e5d6635c33bf365f134e7
| 49,812
|
def convert_to_int(word, base=ord('A')):
"""
Converts a word to an integer representation.
`base` should be ord('A') for uppercase words, 'a' for lowercase
"""
result = 0
for i,c in enumerate(word):
result |= (ord(c) - base) << (5*i)
return result
|
9169a8f3b02d839534c43189156f4d0e03458874
| 49,813
|
def compute_water_ingress_given_damage(damage_index, wind_speed,
water_ingress):
"""
compute percentage of water ingress given damage index and wind speed
Args:
damage_index: float
wind_speed: float
water_ingress: pd.DataFrame
index: damage index
columns: wi
Returns:
prop. of water ingress ranging between 0 and 1
"""
assert 0.0 <= damage_index <= 1.0
# Note that water_ingress index are upper threshold values of DI
idx = water_ingress.index[(water_ingress.index < damage_index).sum()]
return water_ingress.at[idx, 'wi'](wind_speed)
|
9c8a81c080ac4b98e217a2b583e660ed8572dc5d
| 49,815
|
def CheckUrlPatternIndexFormatVersion(input_api, output_api):
""" Checks the kUrlPatternIndexFormatVersion is modified when necessary.
Whenever any of the following files is changed:
- components/url_pattern_index/flat/url_pattern_index.fbs
- components/url_pattern_index/url_pattern_index.cc
and kUrlPatternIndexFormatVersion stays intact, this check returns a
presubmit warning to make sure the value is updated if necessary.
"""
url_pattern_index_files_changed = False
url_pattern_index_version_changed = False
for affected_file in input_api.AffectedFiles():
basename = input_api.basename(affected_file.LocalPath())
if (basename == 'url_pattern_index.fbs' or
basename == 'url_pattern_index.cc'):
url_pattern_index_files_changed = True
if basename == 'url_pattern_index.h':
for (_, line) in affected_file.ChangedContents():
if 'constexpr int kUrlPatternIndexFormatVersion' in line:
url_pattern_index_version_changed = True
break
out = []
if url_pattern_index_files_changed and not url_pattern_index_version_changed:
out.append(output_api.PresubmitPromptWarning(
'Please make sure that url_pattern_index::kUrlPatternIndexFormatVersion'
' is modified if necessary.'))
return out
|
e1a153615ccc6284170d33ba2dfd2ef94985f467
| 49,816
|
def is_jquery_not_defined_error(msg):
"""
Check whether the JavaScript error message is due to jQuery not
being available.
"""
# Firefox: '$ is not defined'
# Chrome: 'unknown error: $ is not defined'
# PhantomJS: JSON with 'Can't find variable: $'
return any(txt in msg for txt in (
"$ is not defined",
"Can't find variable: $",
))
|
2dfa831aecb80269f0f86cf9c4b561ccbff307c3
| 49,822
|
def sign(num):
"""
+1 when positive, -1 when negative and 0 at 0
"""
if num > 0:
return 1
elif num < 0:
return -1
else:
return 0
|
c3ddaf3d7f25e8899df9e5ee087e6820060ca35a
| 49,826
|
def get_all_child_nodes(parent_node):
"""Takes a minidom parent node as input and returns a list of all child nodes """
child_node_list = parent_node.childNodes
return child_node_list
|
aaacd0673e4f08f015ddf119d423191dfd34cdb6
| 49,831
|
def is_easz_conflict(info_list):
"""
Return True/False if info list conflicts
on EASZ resolution function (EOSZ NT sequence).
"""
first_info = info_list[0]
for cur_info in info_list[1:]:
if first_info.easz_nt_seq != cur_info.easz_nt_seq:
return True
return False
|
9fc2b0b8a4df7c958967c6c525eee7d10c0e416c
| 49,833
|
def getPositionAtTime(t):
"""
Simulation time t runs from 0 to 99
First quarter: walking right from 50, 50 to 250, 50
Second quarter: walking down from 250, 50 to 250, 250
Third quarter: walking left from 250, 250 to 50, 250
Fourth quarter: walking up from 50, 250 to 50, 50
"""
if 0 <= t < 25:
return 50 + ((t - 0) * 8), 50, 'right'
elif 25 <= t < 50:
return 250, 50 + ((t - 25) * 8), 'front'
elif 50 <= t < 75:
return 250 - ((t - 50) * 8), 250, 'left'
elif 75 <= t < 100:
return 50, 250 - ((t - 75) * 8), 'back'
|
a9436cc34ae929cb074ae5686ad8a23d2c349c15
| 49,834
|
import torch
def expand_dims_for_broadcast(low_tensor, high_tensor):
"""Expand the dimensions of a lower-rank tensor, so that its rank matches that of a higher-rank tensor.
This makes it possible to perform broadcast operations between low_tensor and high_tensor.
Args:
low_tensor (Tensor): lower-rank Tensor with shape [s_0, ..., s_p]
high_tensor (Tensor): higher-rank Tensor with shape [s_0, ..., s_p, ..., s_n]
Note that the shape of low_tensor must be a prefix of the shape of high_tensor.
Returns:
Tensor: the lower-rank tensor, but with shape expanded to be [s_0, ..., s_p, 1, 1, ..., 1]
"""
low_size, high_size = low_tensor.size(), high_tensor.size()
low_rank, high_rank = len(low_size), len(high_size)
# verify that low_tensor shape is prefix of high_tensor shape
assert low_size == high_size[:low_rank]
new_tensor = low_tensor
for _ in range(high_rank - low_rank):
new_tensor = torch.unsqueeze(new_tensor, len(new_tensor.size()))
return new_tensor
|
b90483cacd7379831514d84f341d994763d90547
| 49,836
|
def ft_generate_center(topics_amount):
"""
Generates a list of lists used to set the center for each layout.
Displays topics in rows of 5.
"""
x = 0
y = 0
center_list = [[x, y]]
for i in range(topics_amount - 1):
if ((i + 1) % 5 == 0):
x = 0
y -= 5
else:
x += 5
center_list.append([x, y])
return center_list
|
65bdba39dd2937122f336d4f8c48774b201446f4
| 49,838
|
import json
def read_jsonl(filename):
"""
Read jsonl file and return output
"""
output = []
with open(filename, 'r') as fp:
for line in fp:
output.append(json.loads(line))
return output
|
6bc3e2b6a19410e84809d1a17aca3c052f613d27
| 49,843
|
def blocks_slice_to_chunk_slice(
blocks_slice: slice, chunk_shape: int, chunk_coord: int
) -> slice:
"""
Converts the supplied blocks slice into chunk slice
:param blocks_slice: The slice of the blocks
:param chunk_shape: The shape of the chunk in this direction
:param chunk_coord: The coordinate of the chunk in this direction
:return: The resulting chunk slice
"""
return slice(
min(max(0, blocks_slice.start - chunk_coord * chunk_shape), chunk_shape),
min(max(0, blocks_slice.stop - chunk_coord * chunk_shape), chunk_shape),
)
|
c23b09cc5d0b65dbc8add0e7a237669a03d0da88
| 49,848
|
def get_attr(model: object, name: str):
"""Get Object Attribute IF Exist"""
if hasattr(model, name):
return getattr(model, name)
return None
|
c1eb619066d72f64889234ae322cdb2767d29654
| 49,850
|
import re
def homogeneize_phone_number(numbers):
"""
Homogeneize the phone numbers, by stripping any space, dash or dot as well
as the international prefix. Assumes it is dealing with French phone
numbers (starting with a zero and having 10 characters).
:param numbers: The phone number string to homogeneize (can contain
multiple phone numbers).
:return: The cleaned phone number. ``None`` if the number is not valid.
"""
if not numbers:
return None
clean_numbers = []
for number in numbers.split(','):
number = number.strip()
number = number.replace(".", "")
number = number.replace(" ", "")
number = number.replace("-", "")
number = number.replace("(", "")
number = number.replace(")", "")
number = re.sub(r'^\+\d\d', "", number)
if not number.startswith("0"):
number = "0" + number
if len(number) == 10:
clean_numbers.append(number)
if not clean_numbers:
return None
return ", ".join(clean_numbers)
|
28d0b67daeb0a06eff80f7d00ac41be4b16590e3
| 49,856
|
def retrieve_all_parameters(parameter_info_dict):
"""Retrieve all parameters from parameter dictionary."""
return sorted({x for v in parameter_info_dict.values() for x in v})
|
4024021bf039168d5f1926e6f602490b3a08ec4d
| 49,857
|
import requests
import json
def get_coordinates_info(lon, lat):
"""
Request from geocode.arcgis API an info about (lat, lon) coordinates
:param lon: a number for Longitude
:param lat: a number for Latitude
:return: dict if found data else None
"""
path = r"https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/reverseGeocode?" \
"location={}%2C{}&langCode=en&outSR=&forStorage=false&f=pjson".format(lon, lat)
x = requests.get(path)
if x.ok:
return json.loads(x.text)
|
e421909cab65587523197e4e6cbed1afbb37576c
| 49,858
|
def consecutiveSlopes(ys, xs):
"""
Get slopes of consecutive data points.
"""
slopes = []
samplePeriod = xs[1]-xs[0]
for i in range(len(ys)-1):
slope = (ys[i+1]-ys[i])/(samplePeriod)
slopes.append(slope)
return slopes
|
f0c5b107f08d436560c079fbdc15b9cacaca55c1
| 49,861
|
def _user_has_course_access_role(user):
"""
Returns a boolean indicating whether or not the user is known to have at least one course access role.
"""
try:
return user.courseaccessrole_set.exists()
except Exception: # pylint: disable=broad-except
return False
|
231f7a5551a1861ece44c5c85ec96e59d4c7e793
| 49,863
|
import imp
def get_city(source_file):
"""
Given a source file, get the city it refers to.
"""
data_module = imp.load_source('local_data', source_file)
return data_module.city
|
9eef94be0a8cceecde91cd7f1f23e4d1aea6e2dc
| 49,865
|
import inspect
import asyncio
def convert_gen_to_async(gen, delay):
"""Convert a regular generator into an async generator by delaying between items"""
assert inspect.isgenerator(gen)
async def inner_async_gen():
for value in gen:
yield value
await asyncio.sleep(delay)
return inner_async_gen
|
94fbe2e84c14bc359f8a8da28a466ff4a6ed4f5a
| 49,869
|
def contains_key_and_has_value(check_dict, key):
"""
依據傳入的字典與鍵值,確認是否有此鍵且有值
Returns:
True: 有鍵值也有值
False: 可能沒有鍵值或沒有值(None)
"""
if key in check_dict and check_dict[key]:
return True
return False
|
cd4c6ef42813c766689fd0889ed228309edcce6d
| 49,870
|
def get_span_length(spans: list):
"""
For spacy.span object in a list gets the length of the object
:param spans: list of spacy.span objects
:return: list of lengths of the spacy.span objects
"""
return [len(span) for span in spans]
|
8387f08849f089d896105dd685bf2536684e36d6
| 49,874
|
def replace_double_hyphen(value):
"""Replace ``--`` (double) with ``-`` (single).
Args:
value (str): string which have ``--``.
Returns:
str (``--`` is replaced with ``-``.)
Examples:
>>> val = "123--456"
>>> replace_double_hyphen(val)
"123-456"
"""
value = str(value).replace("--", "-")
return value
|
1a9eb18276db8894f0e5152a620e2fea73b5d49d
| 49,878
|
import torch
def make_separable(ker, channels):
"""Transform a single-channel kernel into a multi-channel separable kernel.
Args:
ker (torch.tensor): Single-channel kernel (1, 1, D, H, W).
channels (int): Number of input/output channels.
Returns:
ker (torch.tensor): Multi-channel group kernel (1, 1, D, H, W).
"""
ndim = torch.as_tensor(ker.shape).numel()
repetitions = (channels,) + (1,)*(ndim-1)
ker = ker.repeat(repetitions)
return ker
|
80c8ab22f8b39fb5fb91223ed8ecb0c6c3d75a05
| 49,882
|
from typing import Dict
from typing import Any
import pickle
def read_language_file(pickle_filepath: str) -> Dict[Any, Any]:
"""
Read language file.
Parameters
----------
pickle_filepath : str
Returns
-------
language_data : Dict[Any, Any]
Examples
--------
>> from lidtk.utils import make_path_absolute
>> path = make_path_absolute('~/.lidtk/lang/de.pickle')
>> data = read_language_file(path)
>> sorted(list(data.keys()))
['paragraphs', 'used_pages']
"""
with open(pickle_filepath, "rb") as handle:
unserialized_data = pickle.load(handle)
return unserialized_data
|
7bb47a38190d77d350ae0042ca3867447b02f29e
| 49,883
|
def rem_num(num, lis):
""" Removes all instances of a number 'num', from list lis. """
return [ele for ele in lis if ele != num]
|
c2fd18b49a70a01bf9d44da1b2aacf8d4e93cbe9
| 49,887
|
def string_to_int_list(number_string):
"""Convert a string of numbers to a list of integers
Arguments:
number_string -- string containing numbers to convert
"""
int_list = []
for c in number_string:
int_list.append(int(c))
return int_list
|
f53d286dc5a0ac4ad0312d5d071577b9ff93554e
| 49,889
|
def bold_follows(parts, i):
"""Checks if there is a bold (''') in parts after parts[i]. We allow
intervening italics ('')."""
parts = parts[i + 1:]
for p in parts:
if not p.startswith("''"):
continue
if p.startswith("'''"):
return True
return False
|
a51ac103bb00845f7ea547f24da37e239135f519
| 49,895
|
def ensure_bytes(value):
"""Converts value to bytes.
Converts bytearray and str to bytes. Scanners may create
child files that are one of these types, this method is used on
every file object to ensure the file data is always bytes.
Args:
value: Value that needs conversion to bytes.
Returns:
A byte representation of value.
"""
if isinstance(value, bytearray):
return bytes(value)
elif isinstance(value, str):
return value.encode('utf-8')
return value
|
823aed980535b3a940849d7b265e78a65d3cca33
| 49,901
|
import json
def read_json_file(file_to_read: str) -> dict:
"""Read a json file.
:type file_to_read:str:
:param file_to_read:str:
:raises:
:rtype: dict
"""
json_results = dict()
with open(file_to_read, 'r') as json_file:
json_results = json.load(json_file)
return json_results
|
c36e5fdacbf3521c5fa57ceefc1923c5e8ef85e9
| 49,905
|
import lzma
def C(x: bytes):
"""
gives the compressed length of a byte string
"""
return len(lzma.compress(x))
|
4180d3f4ae29e245d6f3d0b6bb47a5c067500842
| 49,907
|
def format_mac(str_hex):
"""Accept a string of hexadecimal digits and return a string of MAC address format.
Arguments:
str_hex: a string of hexadecimal digits
"""
i = 1
str_mac = ':'.join([str_hex[i:i+2] for i in range(0, 12, 2)])
return str_mac
|
9b009984bd09d7ac80db51ae01176bcdcb0014b2
| 49,909
|
import torch
def min_max_norm(x):
"""Min-Max normalization using PyTorch."""
maX = torch.max(x)
mIn = torch.min(x)
return (x - mIn) / (maX - mIn)
|
027489c8325a250bcd33481bb61a35338a776324
| 49,911
|
def convert_from_tuple(core_tuple):
"""
Converts arrus core tuple to python tuple.
"""
v = [core_tuple.get(i) for i in range(core_tuple.size())]
return tuple(v)
|
bc4bed173ab28d7209fc101ced8562a14aefb80c
| 49,912
|
def convert_perm(m):
"""
Convert tuple m of non-negative integers to a permutation in
one-line form.
INPUT:
- ``m`` - tuple of non-negative integers with no repetitions
OUTPUT: ``list`` - conversion of ``m`` to a permutation of the set
1,2,...,len(m)
If ``m=(3,7,4)``, then one can view ``m`` as representing the
permutation of the set `(3,4,7)` sending 3 to 3, 4 to 7, and 7 to
4. This function converts ``m`` to the list ``[1,3,2]``, which
represents essentially the same permutation, but of the set
`(1,2,3)`. This list can then be passed to :func:`Permutation
<sage.combinat.permutation.Permutation>`, and its signature can be
computed.
EXAMPLES::
sage: sage.algebras.steenrod.steenrod_algebra_misc.convert_perm((3,7,4))
[1, 3, 2]
sage: sage.algebras.steenrod.steenrod_algebra_misc.convert_perm((5,0,6,3))
[3, 1, 4, 2]
"""
m2 = sorted(m)
return [list(m2).index(x)+1 for x in m]
|
aaf2755985a8e66efdcd06c0e42fdda53481b287
| 49,914
|
def infectious_from_cases(cases, R0) -> float:
"""
Initializes the "infectious" component of a SIR model from the current
number of cases.
This formula assumes a perfect exponential growth.
"""
if R0 <= 1:
raise ValueError(f"R0 must be greater than one (got {R0})")
seed = 1
return (cases * (R0 - 1) + seed) / (2 * R0 - 1)
|
c1be11670b074ef714528db8c32f8f22be8ff716
| 49,917
|
def find_offset(fn:str):
"""\
Finds the offset for the XSPD block. At the moment this just searches
the file for the XSPD header, but in future this will use the offsets
of the other blocks to seek to the correct location.
"""
BS = 4194304
with open(fn, "rb") as wad:
count = 0
b = wad.read(BS)
while len(b) > 0:
if b"XSPD" in b:
return b.find(b"XSPD") + BS*count
count += 1
|
8c007b5741e5323f6a1830f2812d8c61aae504b2
| 49,918
|
def is_paired(aln):
"""
Input:
pysam.AlignedSegment
"properly" paired (correct reference and orientation)
NB: This is used instead of the properly_paired flag as rare superamplicons
fall outside the expected insert size distribution and are not marked as such.
NB: It does not check if the orientation is FR as RF orientations are discarded due
to no biological sequence.
Returns:
Boolean
"""
return aln.is_paired and aln.is_reverse != aln.mate_is_reverse
|
8ff169a1bc4a2d30fe7ea0ba92135e773349f952
| 49,919
|
import string
def remove_punctuation(text: str) -> str:
"""Remove punctuation characters from a string.
Args:
text (str): String containing punctuation to be removed.
Returns:
str: String with all punctuation removed.
"""
return text.translate(str.maketrans("", "", string.punctuation))
|
a2968f8da45d992ce1ccd5957a1b0141c35429bc
| 49,921
|
def battery_status(battery_analog_in) -> float:
"""Return the voltage of the battery"""
return (battery_analog_in.value / 65535.0) * 3.3 * 2
|
df3356ca5247767c13c750827917928ef7c3825c
| 49,925
|
def _serializable_load(cls, args, kwargs, state):
"""
Create the new instance using args and kwargs, then apply the additional state. This is used by the __reduce__
implementation.
:param cls: class to create an insance of
:param args: positional arguments
:param kwargs: keyword arguments
:param state: additional stored state
:return: function call result
"""
obj = cls(*args, **kwargs)
obj._set_state(state)
return obj
|
40ec75f31f8262ce511f1c64f82944803d747817
| 49,927
|
def sortByChanValue(chan):
"""
Use in .sort() when you want to sort a list of channels by value
:param chan: channel obj
:return: value
"""
return chan.value
|
0d1e2ac8a8d078aa985236d3c0edea14be49a16b
| 49,936
|
def alwayslist(value):
"""If input value if not a list/tuple type, return it as a single value list."""
if value is None:
return []
if isinstance(value, (list, tuple)):
return value
else:
return [value]
|
b463836d6b647ea81803333e0fdbd7eefb981446
| 49,937
|
from typing import Union
def expo(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Raises given number to given power and returns result"""
expo: Union[int, float] = num1 ** num2
return expo
|
10478539f38f0569342ec4defc6c69ade5b25aff
| 49,940
|
def _spark_calc_op_on_chunks(bucket, data, operators_list):
"""
Calculate operators on chunk of data
return None if no data provided
:param bucket: bucket number
:type bucket: int
:param data: timeseries data
:type data: 2-d array
:param operators_list: list of operators calculated on data
:type operators_list: list
:return:tuple of (bucket number, result dict of calculated operators on chunk - Keys are operators.)
:rtype: tuple (int, dict)
"""
result = {}
# number of points processed
nb_points = len(data)
result['NB_POINTS'] = nb_points
# keep only values
values = data[:, 1]
if values.size:
for operator in operators_list:
if operator == 'MIN':
result['MIN'] = min(values)
if operator == 'MAX':
result['MAX'] = max(values)
if operator == 'AVG' or operator == 'STD':
result['SUM'] = sum(values)
if operator == 'STD':
result['SQR_SUM'] = sum([x ** 2 for x in values])
else:
return None
return bucket, result
|
61ebabc41a4869321d60da89cc1eb8df70045bb9
| 49,942
|
def create_key2list(keys):
"""
keyがkeys,valueがlist()の辞書を作成する.
Args:
keys: keyに設定したい値(リスト)
return:
key2list: keyがkeys,valueがlist()の辞書
"""
key2list = dict()
for i in keys:
key2list[i] = list()
return key2list
|
298c7efe9b8b2ebbc5c06067fcc46dfb34c5ee22
| 49,947
|
def usage(err=''):
""" Prints the Usage() statement for the program """
m = '%s\n' %err
m += ' Default usage is to rebuild the python base code from a wsdl.\n'
m += ' '
m += ' genBase <wsdl path> \n'
m += ' or\n'
m += ' genBase -b <base name> -p <output_path> <wsdl path> \n'
return m
|
ccd9966944e902643bd49fc9563caf1b5a20ff4c
| 49,948
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.