content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def _get_QActionGroup(self):
"""
Get checked state of QAction
"""
if self.checkedAction():
return self.actions().index(self.checkedAction())
return None
|
0581a9176c6291714f81b5f6570fac1504153ddb
| 675,103
|
def update_parameters(parameters, grads, learning_rate):
"""
Using gradient descent to update parameters
Arguments: parameters is python dictionary containing the parameters
grads is python dictionary containing gradients and the output of L_model_backward
Returns: parameters that are updated
"""
L = len(parameters)//2 #number of layers in the neural network
for l in range(L):
parameters["W"+str(l+1)] = parameters["W"+str(l+1)] - learning_rate* grads["dW" + str(l + 1)]
parameters["b"+str(l+1)] = parameters["b"+str(l+1)] - learning_rate* grads["db" + str(l + 1)]
return(parameters)
|
897a0cd90bcafe146d6ced332bf6d77261d286e3
| 412,214
|
def host_and_page(url):
""" Splits a `url` into the hostname and the rest of the url. """
url = url.split('//')[1]
parts = url.split('/')
host = parts[0]
page = "/".join(parts[1:])
return host, '/' + page
|
db035aeeaf2c1ae7b9eb00f00daf5673a9551edf
| 100,865
|
import torch
def construct_optimizer(model, cfg):
"""
Construct a stochastic gradient descent or ADAM optimizer with momentum.
Details can be found in:
Herbert Robbins, and Sutton Monro. "A stochastic approximation method."
and
Diederik P.Kingma, and Jimmy Ba.
"Adam: A Method for Stochastic Optimization."
Args:
model (model): model to perform stochastic gradient descent
optimization or ADAM optimization.
cfg (config): configs of hyper-parameters of SGD or ADAM, includes base
learning rate, momentum, weight_decay, dampening, and etc.
"""
optim_params = model.parameters()
if cfg.SOLVER.OPTIMIZING_METHOD == "sgd":
return torch.optim.SGD(
optim_params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
dampening=cfg.SOLVER.DAMPENING,
nesterov=cfg.SOLVER.NESTEROV,
)
elif cfg.SOLVER.OPTIMIZING_METHOD == "adam":
return torch.optim.Adam(
optim_params,
lr=cfg.SOLVER.BASE_LR,
betas=(0.9, 0.999),
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
elif cfg.SOLVER.OPTIMIZING_METHOD == "adamw":
return torch.optim.AdamW(
optim_params,
lr=cfg.SOLVER.BASE_LR,
betas=(0.9, 0.999),
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
else:
raise NotImplementedError(
"Does not support {} optimizer".format(cfg.SOLVER.OPTIMIZING_METHOD)
)
|
63569f73c1d0e5aba9bdd6785d745ddc542c345e
| 494,792
|
def strip_type(caller):
"""
strip the -indel or -snp from the end of a caller name
"""
vartype = ''
if caller.endswith('-snp'):
caller = caller[:-len('-snp')]
vartype = 'snp'
elif caller.endswith('-indel'):
caller = caller[:-len('-indel')]
vartype = 'indel'
# if there is still a dash, get everything after it
i = caller.rfind('-')
if i != -1:
caller = caller[i+1:]
return caller, vartype
|
146a972c9110ce6f39f7cc68cee6d8da889816f0
| 163,814
|
from typing import List
def get_records(project) -> List[dict]:
"""
Obtain all data from a project
:param project:
:return:
"""
all_data = project.export_records()
return all_data
|
eb8b8c04d86dc536f4aef38b8f10f2c73f3597b5
| 349,490
|
def _tagged2tuples(tagged_dicts):
"""
>>> _tagged2tuples(get_tagged_from_server("who has starred in the movie die hard?"))
[('who', 'O', 'WP'), ('has', 'O', 'VBZ'), ('starred', 'O', 'VBN'), ('in', 'O', 'IN'), ('the', 'O', 'DT'), ('movie', 'O', 'NN'), ('die', 'O', 'VB'), ('hard', 'O', 'RB'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("What was the last queen album?"))
[('What', 'O', 'WP'), ('was', 'O', 'VBD'), ('the', 'O', 'DT'), ('last', 'O', 'JJ'), ('queen', 'O', 'NN'), ('album', 'O', 'NN'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("What was the first queen album?"))
[('What', 'O', 'WP'), ('was', 'O', 'VBD'), ('the', 'O', 'DT'), ('first', 'ORDINAL', 'JJ'), ('queen', 'O', 'NN'), ('album', 'O', 'NN'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("What actors star in the Big Bang Theory?"))
[('What', 'O', 'WDT'), ('actors', 'O', 'NNS'), ('star', 'O', 'NN'), ('in', 'O', 'IN'), ('the', 'O', 'DT'), ('Big', 'O', 'NNP'), ('Bang', 'O', 'NNP'), ('Theory', 'O', 'NNP'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("what actors star in the big bang theory?", caseless=True))
[('what', 'O', 'WDT'), ('actors', 'O', 'NNS'), ('star', 'O', 'NN'), ('in', 'O', 'IN'), ('the', 'O', 'DT'), ('big', 'O', 'JJ'), ('bang', 'O', 'NN'), ('theory', 'O', 'NN'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("who wrote the song hotel california?", caseless=True))
[('who', 'O', 'WP'), ('wrote', 'O', 'VBD'), ('the', 'O', 'DT'), ('song', 'O', 'NN'), ('hotel', 'O', 'NN'), ('california', 'LOCATION', 'NNP'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("who was the president of the united states in 2012?", caseless=True))
[('who', 'O', 'WP'), ('was', 'O', 'VBD'), ('the', 'O', 'DT'), ('president', 'O', 'NN'), ('of', 'O', 'IN'), ('the', 'O', 'DT'), ('united', 'LOCATION', 'NNP'), ('states', 'LOCATION', 'NNPS'), ('in', 'O', 'IN'), ('2012', 'DATE', 'CD'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("who plays megan in the movie taken?", caseless=True))
[('who', 'O', 'WP'), ('plays', 'O', 'VBZ'), ('megan', 'O', 'NNP'), ('in', 'O', 'IN'), ('the', 'O', 'DT'), ('movie', 'O', 'NN'), ('taken', 'O', 'VBN'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("what language do canadians speak?", caseless=True))
[('what', 'O', 'WDT'), ('language', 'O', 'NN'), ('do', 'O', 'VBP'), ('canadians', 'O', 'NNPS'), ('speak', 'O', 'VB'), ('?', 'O', '.')]
>>> _tagged2tuples(get_tagged_from_server("Light explodes over Pep Guardiola's head in Bernabeu press room. Will Mourinho stop at nothing?! Heh heh"))
[('Light', 'O', 'JJ'), ('explodes', 'O', 'VBZ'), ('over', 'O', 'IN'), ('Pep', 'PERSON', 'NNP'), ('Guardiola', 'PERSON', 'NNP'), ("'s", 'O', 'POS'), ('head', 'O', 'NN'), ('in', 'O', 'IN'), ('Bernabeu', 'LOCATION', 'NNP'), ('press', 'O', 'NN'), ('room', 'O', 'NN'), ('.', 'O', '.'), ('Will', 'O', 'MD'), ('Mourinho', 'PERSON', 'NNP'), ('stop', 'O', 'VB'), ('at', 'O', 'IN'), ('nothing', 'O', 'NN'), ('?!', 'NUMBER', 'CD'), ('Heh', 'O', 'NNP'), ('heh', 'O', 'RB')]
"""
tagged = [(t['originalText'], t['ner'], t['pos']) for t in tagged_dicts]
return tagged
|
183c9f48af51d8c5ea82bc47e599acd703ab4672
| 523,077
|
from typing import SupportsFloat
def _format_float(value: SupportsFloat, *, decimals: int = 6) -> str:
"""Format a node position into a string.
This uses the format requested by 2DM: up to nine significant
digits followed by an exponent, e.g. ``0.5 -> 5.0e-01``.
:param value: A object that supports casting to :class:`float`.
:type value: :obj:`typing.SupportsFloat`
:param decimals: The number of decimal places to include, defaults
to ``6``.
:type decimals: :class:`int`, optional
:return: The formatted string with no extra whitespace.
:rtype: :class:`str`
"""
string = f'{" " if float(value) >= 0.0 else ""}{float(value):.{decimals}e}'
return string
|
c1e5f52c1529652a2130c195300d257f66854101
| 166,506
|
import functools
import operator
def product_of_list(iterable):
"""
Returns the product of an iterable
If the list is empty, returns 1
"""
product_ = functools.reduce(operator.mul, iterable, 1)
return product_
|
10d873ab86667f5eab3d2d4957c1218259a0f674
| 558,697
|
def autocrop_array_shapes(input_shapes, cropping):
"""Computes the shapes of the given arrays after auto-cropping is applied.
For more information on cropping, see the :func:`autocrop` function
documentation.
# Arguments
input_shapes: the shapes of input arrays prior to cropping in
the form of a list of tuples
cropping: a list of cropping modes, one for each axis. If length of
`cropping` is less than the number of axes in the inputs, it is
padded with `None`. If `cropping` is None, `input_shapes` is returned
as is. For more information on their values and operation, see the
:func:`autocrop` documentation.
# Returns
shapes of the given arrays after auto-cropping is applied.
"""
if cropping is None:
return input_shapes
else:
# Check for consistent number of dimensions
ndim = len(input_shapes[0])
if not all(len(sh) == ndim for sh in input_shapes):
raise ValueError("Not all inputs are of the same "
"dimensionality. Got {0} inputs of "
"dimensionalities {1}.".format(len(input_shapes), [len(sh) for sh in input_shapes]))
result = []
# If there are more dimensions than cropping entries, pad
# the cropping
cropping = list(cropping)
if ndim > len(cropping):
cropping = list(cropping) + [None] * (ndim - len(cropping))
for sh, cr in zip(zip(*input_shapes), cropping):
if cr is None:
result.append(sh)
elif cr in {'lower', 'center', 'upper'}:
min_sh = None if any(inputs is None for inputs in sh) else min(sh)
result.append([min_sh] * len(sh))
else:
raise ValueError('Unknown crop mode \'{0}\''.format(cr))
return [tuple(sh) for sh in zip(*result)]
|
acf407935814ae9ffb77a8b7ff3dd747be04bb03
| 663,013
|
def rem3(lst):
"""Remove all 3's from lst"""
i = 0
while i < len(lst):
if lst[i] == 3:
del lst[i]
else:
i = i + 1
return lst
|
0ae008555d53c48b5eeaecb49ad526d501327af3
| 311,751
|
def alphadump(d, indent=2, depth=0):
"""Dump a dict to a str,
with keys in alphabetical order.
"""
sep = "\n" + " " * depth * indent
return "".join(
(
"{}: {}{}".format(
k,
alphadump(d[k], depth=depth + 1)
if isinstance(d[k], dict)
else str(d[k]),
sep,
)
for k in sorted(d.keys())
)
)
|
0511fec7a61454f93fbe7b55244b7349c9175baf
| 479,944
|
def problem_9_6(matrix, key):
""" Given a matrix in which each row and each column is sorted, write a
method to find an element in it. Matrix is size M*N such that each row is
sorted left to right and each column is sorted top to bottom.
Solution: divide and conquer. Start in the top-right and go down and left as
you narrow down to the searched key.
"""
m = len(matrix)
n = len(matrix[0])
i = 0 # traversed rows.
j = n - 1 # traverses columns.
while i < m and j >= 0:
if key == matrix[i][j]:
return True
elif key < matrix[i][j]: # key will be smaller then matrix[..][j]
j -= 1
else: # key will larger than matrix[i][...]
i += 1
return False
|
f9f379aca6650764525b34252802786024a11fb7
| 429,541
|
def _list_at_index_or_none(ls, idx):
"""Return the element of a list at the given index if it exists, return None otherwise.
Args:
ls (list[object]): The target list
idx (int): The target index
Returns:
Union[object,NoneType]: The element at the target index or None
"""
if len(ls) > idx:
return ls[idx]
return None
|
46651d93140b63bcb85b3794848921f8ea42d7bf
| 682,002
|
def compute_precision(golden_standard, mappings):
"""
Computes the precision between the mappings and the gold standard.
If the mappings is an empty list, the precision is 0
:param golden_standard: The list of tuples containing the correct mappings
:param mappings: The list of tuples generated by the algorithm
:return: One float corresponding to precision
"""
if len(mappings) == 0:
return 0
matches = [item for item in golden_standard if item in mappings]
return len(matches) / len(mappings)
|
acd76fd9a5442d77273efb1d294b122e57bb9556
| 231,394
|
def rem_var(var, subs):
"""Deletes any substitutions of var in subs."""
newsubs = subs.copy()
try:
del newsubs[var.constant()]
except KeyError:
pass
try:
del newsubs[var.variable()]
except KeyError:
pass
return newsubs
|
161c62470ad648e2d062d021b528ece4e1555509
| 65,962
|
def get_ident_string(module_class):
"""Returns a string which can be used to identify a module class.
Normal comparison marks the same class as different after reloading it
so this string has to be used to compare modules after reloading instead
of a direct comparison of a type.
"""
return module_class.__module__ + '.' + module_class.__name__
|
95d231be9d895340b7e9bc8c325cd284b17f00a6
| 425,790
|
def _exists(index, nx, ny):
"""
Checks whether an index exists an array
:param index: 2D index tuple
:return: true if lower than tuple, false otherwise
"""
return (0 <= index[0] < nx) and (0 <= index[1] < ny)
|
6f7283beec9cbe370648e5b07997ec77b57be9f5
| 423,965
|
def _sum_frts(results:dict):
"""
Get total number of FRTs
Arguments:
results {dict} -- Result set from `get_total_frts`
Returns:
int -- Total number of FRTs in the nation
"""
if results == None:
return 0
total_frts = 0
for result in results:
if result['state_total']:
total_frts += result['state_total']
return total_frts
|
44d56d9e35c2b910ea10e25bb3de7ba9ae0c6294
| 107,152
|
def image_data(image):
"""Get components and bytes for an image"""
# NOTE: We might want to check the actual image.mode
# and convert to an acceptable format.
# At the moment we load the data as is.
data = image.tobytes()
components = len(data) // (image.size[0] * image.size[1])
return components, data
|
f2298efba3ac49ef15c4afa56beb37ed9774af72
| 165,127
|
def divide_list_into_equal_chunks(alist, chunks):
"""
Divide a list into equal chunks
:param alist: list
:param chunks: int
:return: list
"""
return [alist[i:i + chunks] for i in range(0, len(alist), chunks)]
|
543e42256e46fbd1ff56feb4b684b5c18eae7acc
| 224,773
|
def getCasing(word):
"""Returns the casing for a word"""
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return casing
|
69c48dd1f6943257570ac98a794909d01fd1db0e
| 588,418
|
def _make_sro(object_generator, source_id, rel_type, target_id):
"""
Make an SRO.
:param source_id: The ID of the source object (string)
:param rel_type: The relationship type (string)
:param target_id: The ID of the target object (string)
:return: The SRO, as a dict
"""
rel = object_generator.generate("relationship")
rel["source_ref"] = source_id
rel["target_ref"] = target_id
rel["relationship_type"] = rel_type
return rel
|
a258c0bd17e05a9e8cc979a2061b8c328882e199
| 318,973
|
def length(listed):
"""
return length of list
"""
count = 0
for item in listed:
count += 1
return count
|
6052d82388a16896c74ee3287064095cfc84f787
| 456,765
|
def _calculate_development_risk(module):
"""
Function to calculate Software risk due to the development environment.
This function uses the results of RL-TR-92-52, Worksheet 1B to
determine the relative risk level. The percentage of development
environment characteristics (Dc) applicable to the system under
development determine the risk level.
Baseline (medium) development risk (D) is assigned a 1.
Low development risk (Dc > 0.9) is assigned a 0.5.
High development risk (Dc < 0.5) is assigned a 2.
:param module: the :py:class:`rtk.software.CSCI.Model` or
:py:class:`rtk.software.Unit.Model` data model to calculate.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
module.dc = sum(module.lst_development) / 43.0
if module.dc < 0.5: # High risk
module.d_risk = 2.0
elif module.dc > 0.9: # Low risk
module.d_risk = 0.5
else:
module.d_risk = 1.0
return False
|
f27193929eccd96090944b5c69fb71b5afc3d477
| 190,884
|
def split_data(dataframe):
""" Split the dataset into features and labels
Parameters
----------
dataframe : the full dataset
Returns
-------
features : the features of the dataset
labels : the attack flag label of the dataset
"""
label = dataframe[['ATT_FLAG']].values.ravel()
features = dataframe.drop(['ATT_FLAG'], axis = 1).values
return features, label
|
99f982def66318f3ab40de5cb868cce6548f19f8
| 665,585
|
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
if len(input_list) == 0:
return -1
list_len = len(input_list)
left_index = 0
right_index = list_len - 1
while not left_index > right_index:
mid = (left_index + right_index) // 2
if input_list[mid] == number:
return mid
# left -> mid is sorted i can continue as a binary search
if input_list[left_index] <= input_list[mid]:
# As this subarray is sorted, we can quickly
# check if key lies in half or other half
if input_list[left_index] <= number <= input_list[mid]:
right_index = mid - 1
else:
left_index = mid + 1
# If arr[l..mid] is not sorted, then arr[mid... r]
# must be sorted
elif input_list[mid] <= number <= input_list[right_index]:
left_index = mid + 1
else:
right_index = mid - 1
return -1
|
347c6ed30b580746f7df96e8155a245f5c35c68a
| 146,719
|
def slice_repr(slice_obj):
"""
Get the best guess of a minimal representation of
a slice, as it would be created by indexing.
"""
slice_items = [slice_obj.start, slice_obj.stop, slice_obj.step]
if slice_items[-1] is None:
slice_items.pop()
if slice_items[-1] is None:
if slice_items[0] is None:
return "all"
else:
return repr(slice_items[0]) + ":"
else:
return ":".join("" if x is None else repr(x) for x in slice_items)
|
c894f66478ec830a4968d0cfc5d9e146457012b6
| 705,237
|
def time_minutes(dt):
"""Format a datetime as time only with precision to minutes."""
return dt.strftime('%H:%M')
|
93288a8f0204dd56c59b18b75c62a5c1e059e159
| 670,469
|
def _invoke_codegen_rule_name(plugin, target_name, thrift_src):
"""Returns the name of the rule that invokes the plugin codegen binary."""
return "{}-cpp2-{}-{}".format(target_name, plugin.name, thrift_src)
|
04fe614abbce2049720b7636074dee6cc7b3184b
| 190,641
|
def _get_types(prop):
"""
Returns a property's `type` as a list.
"""
if 'type' not in prop:
return []
if isinstance(prop['type'], str):
return [prop['type']]
return prop['type']
|
b6fbb21a7e1faf6f97d24ccb185e4f6e0229d6a5
| 151,378
|
def hamming(str1, str2):
"""Hamming distance between two strings"""
return sum(a!=b and not( a=='N' or b=='N' ) for a,b in zip(str1, str2))
|
247bde164b51708a504c9aabbf429daeed62a07e
| 314,005
|
def wrap(x, low=0, high=360):
"""
Extended modulo function. Converts a number outside of a range between two numbers by
continually adding or substracting the span between these numbers.
Parameters
----------
x : int or float
number to be wrapped
low : int or float, optional
lowest value of the wrap (default: 0)
high : int or float, optional
lowest value of the wrap (default: 360)
Returns
-------
x : float
wrapped number
"""
angle_range = high - low
x = ((x - low) % angle_range) + low
return x
|
9cc2cd48cee745c8330da06462d00630271818f0
| 356,030
|
def map_value(in_v, in_min, in_max, out_min, out_max):
"""Helper method to map an input value (v_in)
between alternative max/min ranges."""
v = (in_v - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
return max(min(out_max, v), out_min)
|
f6ccb04d4c5e49e6e228fb4498555204b35b91c6
| 322,791
|
def find_midpoint(low, high):
"""
Find the midpoint between two numbers. Expects low <= high.
Args:
low (int): Low number
high (int): High number
Returns:
(int): midpoint between low and high
"""
if high < low:
raise ValueError("Expected arg \"low\" to be less than arg \"high\"")
return low + (high - low) / 2
|
d9cf25f6888eabd7d629e60f04b59c5880814589
| 317,219
|
def extractDigits(key):
"""Split a string which may contain a number into
a tuple of the string without the digits, and the
integer value of the digits. We can then use that as
a good thing to sort on, so that we get
"a5" and "a15" right.
"""
text = ""
digits = ""
for c in key:
if c in "0123456789":
digits += c
else:
text += c
return (text, 0 if not digits else int(digits))
|
e2c5acad9dbff04019f2197c1f5c42f5b9a36512
| 177,910
|
def _present_config_results(config_records, resource_ids):
"""
If resource_id is in config_records add to dictionary and return dictionary
:param config_records: config compliance records
:param resource_ids: list of resource ids
:returns: dictionary of resource_id: compliance_type
"""
found_records = {}
for config_records in config_records:
config_record_id = config_records['EvaluationResultIdentifier'][
'EvaluationResultQualifier']['ResourceId']
if config_record_id in resource_ids:
found_records[config_record_id] = config_records["ComplianceType"]
return found_records
|
4e72bd97bc8bbca3e00dd1aa095cee9b5744b0ca
| 241,299
|
import binascii
def to_utf8_unicode_point_string(arg):
"""Given a string, encode it as UTF-8 and return a human-readable
unicode point representation. Example::
>> to_utf8_unicode_point_string('A')
U+41
>> to_utf8_unicode_point_string('hi ルーカスǃ')
U+686920e383abe383bce382abe382b9c783
"""
return 'U+' + binascii.hexlify(arg.encode('utf8')).decode('ascii')
|
6b98ccaf4ee4f85a2f54f86f50b1130d4d8a7304
| 239,108
|
def doc_flat_to_nested(key_list, val):
""" Convert a flat keys and value into a nested document.
e.g.:
{ a.b.c: 1 }
=>
{ a: { b: { c: 1 } } }
"""
res = {}
if len(key_list) > 1:
res[key_list[0]] = doc_flat_to_nested(key_list[1:], val)
elif len(key_list) == 1:
res[key_list[0]] = val
else:
raise Exception('invalid key_list @%s' % doc_flat_to_nested.__name__)
return res
|
6959a9027b4e901557d6ce3f37e864b3892bb710
| 252,825
|
def normalize_url(url):
"""If passed url doesn't include schema return it with default one - http."""
if not url.lower().startswith('http'):
return 'http://%s' % url
return url
|
b957056e8ca32f6294c85466b6e52c56bb1dba84
| 693,995
|
import re
from typing import OrderedDict
def get_example_sections(example):
"""Parses a multipart example and returns them in a dictionary by type.
Types will be by language to highlight, except the special "__doc__"
section. The default section is "html".
"""
parts = re.split(r'<!-- (.*) -->', example)
del parts[0]
i = iter(parts)
sections = OrderedDict()
for lang, code in zip(i, i):
sections[lang] = code.strip()
# assert lang != 'TEMPLATE', example
return sections
|
a28ede469e82755528013a3b1cc08a56ed024577
| 663,834
|
from typing import Callable
def has_non(function: Callable) -> Callable:
"""
A convenient operator that takes a function (presumed to have the same signature as all of
the other functions in this section), and returns the logical "converse" - that is, a function
which returns True if any part of the input string fails to return True on the given function.
For example, if we had a function `is_greek` which returns True is a string consists entirely
of Greek cheracters - then `has_non(is_greek)` would return a callable which returns True if the
given string contains any non-Greek characters.
Note the function thus produced may be quite inefficient. But in a pinch it can be useful to
whip out "converse" forms of validating functions in this way.
"""
def wrapped(string: str) -> bool:
if isinstance(string, str):
return any(not function(_) for _ in string)
raise ValueError("invalid input - not a string")
return wrapped
|
f95c140bb7d295b8557dc9d7ddb3e42b1bf74ebf
| 71,190
|
import json
def _get_metadata(dataset):
"""Get the layer metadata if exist"""
metadata = [k for k in dataset['keywords']
if k.startswith('layer_info:')]
if not metadata:
return ""
else:
return json.loads(metadata[0].split("layer_info:")[1])
|
9c8a0269e9c841caeb299b4b489e8531ee9c5d0a
| 489,354
|
def pop_required_arg(arglist, previousArg) :
"""
Pop the first element off the list and return it.
If the list is empty, raise an exception about a missing argument after the $previousArgument
"""
if not len(arglist) :
raise Exception, "Missing required parameter after %s" % previousArg
head = arglist[0]
del arglist[0]
return head
|
ed35be7859326979dceb5843a07769e5a022a30b
| 56,157
|
import math
def get_grid_size(k: int) -> int:
"""
returns the grid size (total number of elements for a
cgr of k length kmers
:param: k int -- the value of k to be used
:return: int -- the total number of elements in the grid
"""
return int(math.sqrt(4 ** k))
|
2a770d0be8093a16f7c795fb6b8dfeccd8c8e140
| 568,752
|
from typing import List
def hello_world(cities: List[str] = ["Berlin", "Paris"]) -> bool:
"""
Hello world function.
Arguments:
- cities: List of cities in which 'hello world' is posted.
Return:
- success: Whether or not function completed successfully.
"""
try:
[print("Hello {}!".format(c)) for c in cities] # for loop one-liner
return True
except KeyboardInterrupt:
return False
finally:
pass
|
a24f0f47c9b44c97f46524d354fff0ed9a735fe3
| 706,644
|
def get_section_from_chunk(chunk, sectionname):
"""Extract a named section of a chunk"""
section = []
in_section = False
for line in chunk:
if line == sectionname:
in_section = True
continue
if in_section:
if line == "":
# We've reached the end of the section
break
else:
section.append(line)
return section
|
d2e40c599545f5c770a50f260ce0ac858814a80e
| 72,187
|
import importlib
def import_modules(*modules):
"""This function imports and returns one or more modules to utilize in a unit test.
.. versionadded:: 2.7.4
:param modules: One or more module paths (absolute) in string format
:returns: The imported module(s) as an individual object or a tuple of objects
"""
imported_modules = []
for module in modules:
imported_modules.append(importlib.import_module(module))
tuple(imported_modules)
return imported_modules if len(imported_modules) > 1 else imported_modules[0]
|
9b347215b95aa20dbb21b20fcfc926abac705558
| 213,142
|
import torch
import math
def nanmean(values: torch.Tensor) -> torch.Tensor:
"""
Computes the average of all values in the tensor, skipping those entries that are NaN (not a number).
If all values are NaN, the result is also NaN.
:param values: The values to average.
:return: A scalar tensor containing the average.
"""
valid = values[~torch.isnan(values.view((-1,)))]
if valid.numel() == 0:
return torch.tensor([math.nan]).type_as(values)
return valid.mean()
|
424ffbb9a13d71c76d982f951c7691e700ccdd18
| 388,262
|
def pedir_entero(msj):
"""
Ejecuta input(msj) hasta que se ingrese un entero.
"""
while True:
try:
entero = int(input(msj))
except ValueError:
print('El valor ingresado no es válido.')
else:
return entero
|
ee892b3b129916bb917d825c22975630b37cb414
| 493,979
|
import posixpath
def _join_posixpaths_and_append_absolute_suffixes(prefix_path, suffix_path):
"""
Joins the POSIX path `prefix_path` with the POSIX path `suffix_path`. Unlike posixpath.join(),
if `suffix_path` is an absolute path, it is appended to prefix_path.
>>> result1 = _join_posixpaths_and_append_absolute_suffixes("relpath1", "relpath2")
>>> assert result1 == "relpath1/relpath2"
>>> result2 = _join_posixpaths_and_append_absolute_suffixes("relpath", "/absolutepath")
>>> assert result2 == "relpath/absolutepath"
>>> result3 = _join_posixpaths_and_append_absolute_suffixes("/absolutepath", "relpath")
>>> assert result3 == "/absolutepath/relpath"
>>> result4 = _join_posixpaths_and_append_absolute_suffixes("/absolutepath1", "/absolutepath2")
>>> assert result4 == "/absolutepath1/absolutepath2"
"""
if len(prefix_path) == 0:
return suffix_path
# If the specified prefix path is non-empty, we must relativize the suffix path by removing
# the leading slash, if present. Otherwise, posixpath.join() would omit the prefix from the
# joined path
suffix_path = suffix_path.lstrip(posixpath.sep)
return posixpath.join(prefix_path, suffix_path)
|
5ed573c7707f0a8e64a3f606597aafc7489e0edb
| 250,401
|
from string import ascii_lowercase
def decimalToAlphabetical(index):
"""
Converts int to an alphabetical index. e.g.: 0 -> 'a', 1 -> 'b', 2 -> 'c', 'yama' -> 440414
:param index: int
:return: str
"""
assert isinstance(index, int) and index >= 0
alphanum = ''
index += 1 # because alphabet hase no 0 and starts with 'a'
while index:
index -= 1 # 'a' needs to be used as next 'decimal' unit when reaching 'z': ..., 'y', 'z', 'aa', 'ab', ...
reste = index % 26
index = index // 26
alphanum = ascii_lowercase[reste] + alphanum
return alphanum
|
c10d5e281cbc29ea6d835d9223cfacaa7d4ee9d6
| 418,699
|
import re
def parse_show_snmp_community(raw_result):
"""
Parse the 'show snmp community' command raw output.
:param str raw_result: vtysh raw result string.
:rtype: list
:return: The parsed result of the show snmp community\
command in a list of strings
::
[
'public',
'private',
'community1',
'community2'
]
"""
pattern_found = 0
result = []
res = 0
for line in raw_result.splitlines():
if pattern_found == 2:
result.append(line.strip())
else:
res = re.match(r'\s*-+\s*', line)
if res:
pattern_found = pattern_found + 1
if result == {}:
return None
else:
return result
|
ce962b9030135153fd458440b2232a5546a9345f
| 192,712
|
import yaml
def generate_rke_yaml(ips, user, ssh_private_key):
"""
Make the YAML file that RKE is going to use to create the kubernetes cluster
:param ips: What IP addresses are we working with? Array of strings
:param user: What user should we create?
:param ssh_private_key: Private key text (utf-8) to include in yaml
:return: string with the formatted yaml in it
"""
nodes = []
for ip in ips:
nodes.append(
{
'address': ip,
'user': user,
'ssh_key': ssh_private_key,
'role': [],
},
)
for i, role in enumerate(['controlplane', 'etcd']):
node_idx = i % len(nodes)
nodes[node_idx]['role'].append(role)
for node in nodes:
if len(node['role']) == 0:
node['role'].append('worker')
worker_count = 0
for node in nodes:
if 'worker' in node['role']:
worker_count += 1
if worker_count == 0:
for node in nodes:
if 'controlplane' not in node['role']:
node['role'].append('worker')
worker_count += 1
if worker_count == 0:
nodes[0]['role'].append('worker')
etcd_count = 0
for node in nodes:
if 'etcd' in node['role']:
etcd_count += 1
if etcd_count < 3:
for node in nodes:
if 'etcd' not in node['role'] and 'controlplane' not in node['role']:
node['role'].append('etcd')
etcd_count += 1
if etcd_count >= 3:
break
services = {
'etcd': {'image': 'quay.io/coreos/etcd:latest'},
'kube-api': {'image': 'rancher/k8s:v1.11.6-rancher2'},
'kube-controller': {'image': 'rancher/k8s:v1.11.6-rancher2'},
'scheduler': {'image': 'rancher/k8s:v1.11.6-rancher2'},
'kubelet': {'image': 'rancher/k8s:v1.11.6-rancher2'},
'kubeproxy': {'image': 'rancher/k8s:v1.11.6-rancher2'},
}
addons = [
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "cloudbolt-admin",
"namespace": "kube-system"
}
},
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRoleBinding",
"metadata": {
"name": "cloudbolt-admin"
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "cluster-admin"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "cloudbolt-admin",
"namespace": "kube-system"
}
]
},
]
document = {
'nodes': nodes,
'services': services,
'addons': '---\n' + yaml.dump_all(addons),
}
return yaml.dump(document)
|
889e3d73420ee4eb0c5d7c84531e37127c454368
| 366,456
|
import csv
def parse_ngram_file(csv_file):
"""Parse a single ngram wordlist into a hashtable."""
ngrams = {}
with open(csv_file, newline='') as f:
for row in csv.reader(f):
ngram = tuple(row[0].split())
ngrams[ngram] = True
return ngrams
|
eec50a6a88104cec27809bf938a12c862d7deba2
| 383,077
|
def _process_init_command(args, _model):
"""Handle the init command: create an empty password database."""
assert args.command == 'init'
# Keep the model empty and let the main() function write out the database.
return 0
|
684d6f8c68bceb79dd1c18a934a4009762bd238d
| 162,911
|
from typing import List
from typing import Dict
from typing import Any
def format_sents_for_output(sents: List[str], doc_id: str) -> Dict[str, Dict[str, Any]]:
"""
Transform a list of sentences into a dict of format:
{
"sent_id": {"text": "sentence text", "label": []}
}
"""
formatted_sents = {}
for i, sent in enumerate(sents):
formatted_sents.update({f"{doc_id}_sent_{i}": {"text": sent, "label": []}})
return formatted_sents
|
d6178ac48da4d95e8d3727ca9220168e06ba223e
| 8,177
|
def _get_fuzzer_module_name(fuzzer: str) -> str:
"""Returns the name of the fuzzer.py module of |fuzzer|. Assumes |fuzzer| is
an underlying fuzzer."""
return 'fuzzers.{}.fuzzer'.format(fuzzer)
|
7fcebf65168d6dc4e4f70a941fe6b0e30cb5790c
| 218,469
|
def filter_matching_taxon_ids(query_set, taxon_id=None):
"""Filters out all instances with a taxonomy id that does not match
`taxon_id` if taxon_id is not None. Taxonomy id must be one supported by
`UniProt`.
Parameters
----------
query_set : :class:`Query`
A query instance from `sqlalchemy`
taxon_id : int, optional
An integer taxonomy id supported by `UniProt`
Returns
-------
:class:`Query`
Filtered query instance.
"""
if taxon_id is not None:
return query_set.filter_by(taxon_id=taxon_id)
else:
return query_set
|
55cae9addfe1db488a1cbb26c9c9408dfd86780e
| 132,941
|
def Top1Accuracy(predictions, recognition_solution):
"""Computes top-1 accuracy for recognition prediction.
Note that test images without ground-truth are ignored.
Args:
predictions: Dict mapping test image ID to a dict with keys 'class'
(integer) and 'score' (float).
recognition_solution: Dict mapping test image ID to list of ground-truth
landmark IDs.
Returns:
accuracy: Top-1 accuracy (float).
"""
# Loop over test images in solution. If it has at least one class label, we
# check if the predicion is correct.
num_correct_predictions = 0
num_test_images_with_ground_truth = 0
for key, ground_truth in recognition_solution.items():
if ground_truth:
num_test_images_with_ground_truth += 1
if key in predictions:
if predictions[key]['class'] in ground_truth:
num_correct_predictions += 1
return num_correct_predictions / num_test_images_with_ground_truth
|
456344b5fcf12e1523cb7dc2f4f5d0064e596db4
| 498,801
|
def loop_noise_coupling_functions(olg, clp, chp, lock='active'):
"""Control loop coupling functions for a opto-mechanical cavity.
Inputs:
-------
olg: array of complex floats
Open loop gain
clp: array of complex floats
Cavity low pass
chp: array of complex floats
Cavity high pass
lock: string
Either "active", representing locking the laser to the cavity,
or "passive", representing locking the cavity to the laser.
Outputs:
--------
a_in: array of complex floats
Input frequency noise coupling to transmitted frequency noise
a_sense: array of complex floats
Sensing frequency noise coupling to transmitted frequency noise
a_disp: array of complex floats
Displacement frequency noise coupling to transmitted frequency noise
"""
if lock == 'active':
a_in = clp / (1 + clp * olg)
a_sense = clp * olg / (1 + clp * olg)
a_disp = clp**2 * olg / (1 + clp * olg) + chp
else:
a_in = clp * (1 + olg) / (1 + clp * olg)
a_sense = chp * olg / (1 + clp * olg)
a_disp = chp / (1 + clp * olg)
return a_in, a_sense, a_disp
|
1ff646772254fe07435493f98c76570d303fc670
| 122,564
|
def size_table_name(model_selector):
"""
Returns canonical name of injected destination desired_size table
Parameters
----------
model_selector : str
e.g. school or workplace
Returns
-------
table_name : str
"""
return "%s_destination_size" % model_selector
|
5309738dd0b59c745c88e30e64f301d0b2cbf949
| 563,932
|
import re
def strings(filename, minChars=4):
"""Search printable strings in binary file
:param filename: The file to be read
:type filename: str
:param minChars: Min-len of characters to return string *(default 4)*
:type minChars: int
:returns: List of printable strings
:rtype: list
"""
regex_string = '[ -~]{' + str(minChars) + ',}'
try:
with open(filename, errors='ignore') as f:
return '\n'.join(re.findall(regex_string, f.read()))
except FileNotFoundError:
return ''
|
caacb1385fa2676b24d38b9b749cdd6a5a497599
| 597,725
|
from random import gauss
def random_mbh(type='agn'):
"""randomizes a black hole mass (in solar masses). one can choose between ``agn`` and ``xrb``."""
if type == 'agn':
# log M_bh = N(7.83, 0.63) (jin12)
return 10**gauss(7.83, 0.63)
elif type == 'xrb':
return 10**gauss(1.1, 0.15)
else: raise Exception('type must be agn or xrb')
|
58a1b638fc1acf1a0bf90a25bc3f929f92f338b7
| 112,279
|
def second_valid_range_str(second_valid_range):
"""
Fixture that yields a string representation of a range within the bounds
of the "second" field.
"""
start, stop = second_valid_range[0], second_valid_range[-1]
return '{0}-{1}'.format(start, stop)
|
fc58fd7191289986d50a5302565a3c000f9154b6
| 331,974
|
def set_time_resolution(datetime_obj, resolution):
"""Set the resolution of a python datetime object.
Args:
datetime_obj: A python datetime object.
resolution: A string indicating the required resolution.
Returns:
A datetime object truncated to *resolution*.
Examples:
.. code-block:: python
from typhon.utils.time import set_time_resolution, to_datetime
dt = to_datetime("2017-12-04 12:00:00")
# datetime.datetime(2017, 12, 4, 12, 0)
new_dt = set_time_resolution(dt, "day")
# datetime.datetime(2017, 12, 4, 0, 0)
new_dt = set_time_resolution(dt, "month")
# datetime.datetime(2017, 12, 1, 0, 0)
"""
if resolution == "year":
return set_time_resolution(datetime_obj, "day").replace(month=1, day=1)
elif resolution == "month":
return set_time_resolution(datetime_obj, "day").replace(day=1)
elif resolution == "day":
return datetime_obj.replace(hour=0, minute=0, second=0, microsecond=0)
elif resolution == "hour":
return datetime_obj.replace(minute=0, second=0, microsecond=0)
elif resolution == "minute":
return datetime_obj.replace(second=0, microsecond=0)
elif resolution == "second":
return datetime_obj.replace(microsecond=0)
elif resolution == "millisecond":
return datetime_obj.replace(
microsecond=int(datetime_obj.microsecond / 1000) * 1000
)
else:
raise ValueError("Cannot set resolution to '%s'!" % resolution)
|
449d5ac691ea04ce2a19fb825743d081add5990c
| 101,120
|
def handle_title(block):
"""
Extracts information from a title block (from a README doctree).
Args:
block: A title doctree block
Returns: A dictionary with the extracted information
"""
ret = {
'type': 'title',
'text': '\n'.join((block.astext(), '~' * 20)),
}
return ret
|
671a76fbd6757d6036c16d94dd40e1729ecb6025
| 469,182
|
def generate_test_description(local_symbols, *variable_names):
"""
Generate test description.
:param local_symbols: local symbol table from where the function was called
:param variable_names: variable names
:return: test description
"""
variables_text = ', '.join('{} = {}'.format(variable_name, eval(variable_name, local_symbols))
for variable_name in variable_names)
return 'when testing \'{}\''.format(variables_text)
|
4eddea7075994cc8e3d9e5da4bdb4bf8c85c8aad
| 64,897
|
def two_pts_to_line(pt1, pt2):
"""
Create a line from two points in form of
a1(x) + a2(y) = b
"""
pt1 = [float(p) for p in pt1]
pt2 = [float(p) for p in pt2]
try:
slp = (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
except ZeroDivisionError:
slp = 1e5 * (pt2[1] - pt1[1])
a1 = -slp
a2 = 1.
b = -slp * pt1[0] + pt1[1]
return a1, a2, b
|
d607008c41eaa052c0988a7ac66588b464aab8e0
| 701,271
|
def getKeyByValue(dictOfElements, valueToFind):
"""Get the first key that contains the specified value."""
for key, value in dictOfElements.items():
if value == valueToFind:
return key
|
b568ab7d6e901d548a1295ef8885c67e698a2cf0
| 308,639
|
def get_resultant(X,Y,Z):
"""This function gets the resultant magnitudes of three arrays of vector quantities: X, Y, and Z."""
resultant = []
for i in range(len(X)):
resultant.append((X[i]**2 + Y[i]**2 + Z[i] **2)**0.5)
return resultant
|
494f819008a86a1a60e52f2f1bb9b5ac011b0508
| 598,773
|
def gillespie (r, *args, **kwargs):
"""
Run a Gillespie stochastic simulation.
Examples:
rr = te.loada ('S1 -> S2; k1*S1; k1 = 0.1; S1 = 40')
# Simulate from time zero to 40 time units
result = rr.gillespie (0, 40)
# Simulate on a grid with 10 points from start 0 to end time 40
result = rr.gillespie (0, 40, 10)
# Simulate from time zero to 40 time units using the given selection list
# This means that the first column will be time and the second column species S1
result = rr.gillespie (0, 40, ['time', 'S1'])
# Simulate from time zero to 40 time units, on a grid with 20 points
# using the give selection list
result = rr.gillespie (0, 40, 20, ['time', 'S1'])
"""
if r.integrator is None:
raise ValueError("model is not loaded")
prev = r.integrator.getName()
if kwargs is not None:
kwargs['integrator'] = 'gillespie'
else:
kwargs = {'integrator' : 'gillespie'}
result = r.simulate(*args, **kwargs)
r.setIntegrator(prev)
return result
|
420c1c5b2d97ded73dc077cad30f6fa94a4e5c79
| 110,257
|
def substract(v1, v2):
"""
Returns the difference of a two 2-D vectors.
"""
return (v2[0] - v1[0], v2[1] - v1[1])
|
39a99b79dbe6e9ee638ed7c001ec84119e70160b
| 177,852
|
import torch
def get_loss_cumu(loss_dict, cumu_mode):
"""Combine different losses to obtain a single scalar loss.
Args:
loss_dict: A dictionary or list of loss values, each of which is a torch scalar.
cumu_mode: a 2-tuple. Choose from:
("generalized-mean"/"gm", {order}): generalized mean with order
"harmonic": harmonic mean
"geometric": geometric mean
"mean": arithmetic mean
"sum": summation
"min": minimum
"original": returns the original loss_dict.
Returns:
loss: the combined loss scalar computed according to cumu_mode.
"""
if cumu_mode == "original":
return loss_dict
if isinstance(loss_dict, dict):
loss_list = torch.stack([loss for loss in loss_dict.values()])
elif isinstance(loss_dict, list):
loss_list = torch.stack(loss_dict)
elif isinstance(loss_dict, torch.Tensor):
loss_list = loss_dict
else:
raise
N = len(loss_list)
if N == 1:
return loss_list[0]
epsilon = 1e-20 # to prevent NaN
if cumu_mode.startswith("gm"):
cumu_mode_str, num = cumu_mode.split("-")
cumu_mode = (cumu_mode_str, eval(num))
if isinstance(cumu_mode, tuple) and cumu_mode[0] in ["generalized-mean", "gm"]:
if cumu_mode[1] == -1:
cumu_mode = "harmonic"
elif cumu_mode[1] == 0:
cumu_mode = "geometric"
elif cumu_mode[1] == 1:
cumu_mode = "mean"
if cumu_mode == "harmonic":
loss = N / (1 / (loss_list + epsilon)).sum()
elif cumu_mode == "geometric":
loss = (loss_list + epsilon).prod() ** (1 / float(N))
elif cumu_mode == "mean":
loss = loss_list.mean()
elif cumu_mode == "sum":
loss = loss_list.sum()
elif cumu_mode == "min":
loss = loss_list.min()
elif cumu_mode[0] in ["generalized-mean", "gm"]:
order = cumu_mode[1]
loss = (((loss_list + epsilon) ** order).mean()) ** (1 / float(order))
else:
raise
return loss
|
27fe5ac266cde59333c5c7f0c40da7ae9f483abe
| 359,166
|
def make_progress_bar_text(percentage: float, bar_length: int = 2) -> str:
"""
Get the progress bar used by seasonal challenges and catalysts and more
Translations:
"A" -> Empty Emoji
"B" -> Empty Emoji with edge
"C" -> 1 Quarter Full Emoji
"D" -> 2 Quarter Full Emoji
"E" -> 3 Quarter Full Emoji
"F" -> 4 Quarter Full Emoji
"""
to_beat = 1 / bar_length / 4
bar_text = ""
for i in range(bar_length):
# 100%
if percentage >= (x := (to_beat * 4)):
bar_text += "F"
percentage -= x
# 75%
elif percentage >= (x := (to_beat * 3)):
bar_text += "E"
percentage -= x
# 50%
elif percentage >= (x := (to_beat * 2)):
bar_text += "D"
percentage -= x
# 25%
elif percentage >= (x := (to_beat * 1)):
bar_text += "C"
percentage -= x
# 0%
else:
# if it's the first one or the last one was empty too, set it to completely empty
if bar_text == "" or bar_text[-1:] != "F":
bar_text += "A"
# else keep the tiny edge
else:
bar_text += "B"
return bar_text
|
6422dfbb7f4bb9b875491398fab57e9f8af5a380
| 677,345
|
def _is_kind(cell, kinds:str) -> bool:
"""Internal function: Check if cell is of one of the given kinds."""
if kinds == 'all' or cell.cell_type in kinds:
return True
if 'jollity' in cell.metadata:
return f'md:{cell.metadata.jollity.kind}' in kinds
return False
|
aa0c41fba6c8bf4f847dfc229e31808fd9482b14
| 262,760
|
import re
def clean_license_name(license_name):
"""Remove the word ``license`` from the license
:param str license_name: Receives the license name
:return str: Return a string without the word ``license``
"""
return re.subn(r'(.*)\s+license', r'\1', license_name, flags=re.IGNORECASE)[0]
|
970d933911b69ba9a1f33a768bc68032334d41c3
| 15,213
|
def short_time(time):
"""
Better readable time - remove subseconds
"""
return str(time)[:-4]
|
43cda011a592b49a525c2ddfa2786a391871331a
| 160,094
|
import torch
def load_model(model, model_path, optimizer_path=None):
"""
Loads model and optimizer (if given) from state_dict
"""
model.load_state_dict( torch.load(model_path))
if optimizer_path:
optim = torch.load(optimizer_path)
return model, optim
return model
|
f91da26579309114a459be9d9c93d84f25b88432
| 140,121
|
import errno
def read_file(path):
"""Returns contents of a file or None if missing."""
try:
with open(path, 'r') as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise
|
9e8f8727eb453169045c7259f690fa3b0504266d
| 458,208
|
def clean_game_date(season_year: int, date: str) -> str:
"""Creates a date string from a season year and date string.
Args:
season_year (int): The season year.
date (str): The date string.
Returns:
str: The date string if the date is part of the regular season, otherwise
returns 'non-regular-season'.
"""
if len(date) == 3 and date[0] == "9":
return f"{season_year}-09-{date[1:]}"
elif len(date) == 4 and int(date[:2]) > 9:
return f"{season_year}-{date[:2]}-{date[2:]}"
elif len(date) == 3 and date[0] in ["1", "2"]:
season_year += 1
return f"{season_year}-0{date[0]}-{date[1:]}"
else:
return "non-regular-season"
|
361f4516332f3f4eb9dc8fcfcd831f7acd4da709
| 455,213
|
def getusername(cursor, displayname):
"""Retrieve internal username from display name."""
cursor.execute("select username from profile where displayname = ?;", (displayname,))
username = cursor.fetchone()
if username is None:
return None
return username[0]
|
48caeb3de8101788280a9eca069e6be5dc51e6ee
| 523,503
|
def rgb_tuple_to_hex_str(rgb, a=None):
"""
Return a hex string representation of the supplied RGB tuple
(as used in SVG etc.) with alpha, rescaling all values from [0,1]
into [0,255] (ie hex x00 to xff).
Parameters:
rgb - (r,g,b) tuple (each of r,g,b is in [0,1])
a - (default None) alpha channel (transparancy) value or None
Return value:
string 'rrggbb' where rr, gg, bb are hex representations of r,g,b resp.
or 'rrggbbaa' if a!=None where aa is hex repr. of alpha value a.
"""
xrgb = tuple([int(round(x*255)) for x in rgb])
if a == None:
return '%02x%02x%02x' % xrgb
else:
a = int(round(a*255))
return '%02x%02x%02x%02x' % (xrgb[0],xrgb[1],xrgb[2],a)
|
fb009639fab6f1e6779397a91b5875820632e713
| 497,288
|
def freq_id_to_stream_id(f_id):
""" Convert a frequency ID to a stream ID. """
pre_encode = (0, (f_id % 16), (f_id // 16), (f_id // 256))
stream_id = (
(pre_encode[0] & 0xF)
+ ((pre_encode[1] & 0xF) << 4)
+ ((pre_encode[2] & 0xF) << 8)
+ ((pre_encode[3] & 0xF) << 12)
)
return stream_id
|
f89d52adf4390f665e069c2b5f4f5accc22709b8
| 702,189
|
def best_evaluation(history):
"""Return the best-ever loss and accuracy.
Not necessarily from the same epoch."""
best_loss = min(history.history['val_loss'])
best_accuracy = max(history.history['val_acc'])
return best_loss, best_accuracy
|
49a5087819acd7d69760d4cf3c60d976dc360262
| 198,571
|
def one_or_none(values):
"""Fetch the first value from values, or None if values is empty. Raises
ValueError if values has more than one thing in it."""
if not values:
return None
if len(values) > 1:
raise ValueError('Got more than one value.')
return values[0]
|
e3d1517d6dd32a8f1f04a0eee56d49d844028ca2
| 284,243
|
def make_batch_indexes(size, data_len):
"""
Creates a list of start and stop indexes where start is <size> away from end.
The start and stop indexes represent the start and stop index of each batch ranging for the entire data set.
Parameters
----------
size: int
The size of the batches.
data_len: int
The number of rows in the data.
Returns
-------
list:
A list of list that where the first item in the sublist is the start index and the second is the end index of
that batch.
"""
start_i = 0
end_i = size
index_couples = []
while end_i <= data_len:
couplet = [start_i, end_i]
index_couples.append(couplet)
start_i = end_i
end_i += size
final_couplet = [end_i - size, data_len]
index_couples.append(final_couplet)
return index_couples
|
20c0ad14f8460f155342b0d443062c201dcbc891
| 210,678
|
def getBoundingBox(veclist):
"""Calculate bounding box (pair of vectors with minimum and maximum
coordinates).
>>> getBoundingBox([(0,0,0), (1,1,2), (0.5,0.5,0.5)])
((0, 0, 0), (1, 1, 2))"""
if not veclist:
# assume 3 dimensions if veclist is empty
return (0,0,0), (0,0,0)
# find bounding box
dim = len(veclist[0])
return (
tuple((min(vec[i] for vec in veclist) for i in range(dim))),
tuple((max(vec[i] for vec in veclist) for i in range(dim))))
|
a2c035f85071e5a9f8dfee2c98cc46e86439a0cc
| 22,033
|
def is_child_class(target, base):
""" Check if the target type is a subclass of the base type and not base type itself """
return issubclass(target, base) and target is not base
|
731c551149f94401a358b510aa124ee0cba6d0bd
| 41,567
|
def pretty_print_dict(dtmp):
"""Pretty prints an un-nested dictionary
Parameters
----------
dtmp : dict
Returns
-------
str
pretty-printed dictionary
"""
ltmp = []
keys = dtmp.keys()
maxlen = 2 + max([len(K) for K in keys])
for k, v in sorted(dtmp.items(), key=lambda x: x[0]):
if type(v) == type(""):
v = "'%s'" % v
new_k = "'%s'" % k
stmp = (" {0:<%s} : {1}," % maxlen).format(new_k, v)
ltmp.append(stmp)
sout = "\n".join(ltmp)
return "{\n%s\n}\n" % sout
|
1033777a6a79e4a86bc0b2f4edbaa3a3124e161f
| 248,998
|
import hashlib
def string_hash(obj):
"""Returns a SHA-1 hash of the object. Not used for security purposes."""
return hashlib.sha1(str(obj).encode('utf-8')).hexdigest()
|
957958b1e2623c28cdb4b12ee0163fab43199ded
| 123,797
|
def depth(root):
""" root is a tree node pointer that has access to left and right,
returns the height of the tree + 1 """
if root is None:
return 0
return 1 + max(depth(root.l), depth(root.r))
|
1c9fcd578189a7b4200644fb1bfb80d818bf5a46
| 325,595
|
def _get_end(posn, alt, info):
"""Get record end position."""
if "END" in info:
# Structural variant
return info['END']
return posn + len(alt)
|
f26e6bd9049fb32d5a5fcf305014b81c16bd799d
| 221,527
|
def floatToStr(number: float, showNumOfDigits: int = 2) -> str:
""" Convert float number with any number of decimal digits and return string
with ``showNumbOfDigits`` places after ``.``.
Args:
number: float number to convert to string.
showNumOfDigits: number of decimal places (characters) that are
added/stripped after ``.``.
"""
number = round(number, showNumOfDigits)
if showNumOfDigits == 0:
numberStr = str(int(number))
else:
numberStr = "{:.{}f}".format(number, showNumOfDigits)
return numberStr
|
71099609ea05211d8f3a35401685f355e8ddaa21
| 554,494
|
def get_potential_compressed_names(path):
"""
Get a list of all possible variants of @path with supported
compressions (including the uncompressed path).
"""
return [path + x for x in ('', '.gz', '.bz2', '.lzma', '.xz')]
|
2f5f3cf26f9324805de8db7858bfe31e2f5d466e
| 200,196
|
import re
def clean_name(name: str) -> str:
"""
Clean a name so it can be used as a python identifier.
"""
if not re.match("[a-zA-Z_]", name[0]):
name = "_" + name
name = re.sub("[^0-9a-zA-Z_]+", "_", name)
if all(c == "_" for c in name):
name = "v"
return name
|
e37bc249104c03b25da6e39e59d7084c6ae9b95d
| 439,231
|
def spherical_index_k(degree: int, order: int = 0) -> int:
"""returns the mode `k` from the degree `degree` and order `order`
Args:
degree (int): Degree of the spherical harmonics
order (int): Order of the spherical harmonics
Raises:
ValueError: if `order < -degree` or `order > degree`
Returns:
int: a combined index k
"""
if not -degree <= order <= degree:
raise ValueError("order must lie between -degree and degree")
return degree * (degree + 1) + order
|
592b0dd5550fe28ce79568b74ab4a071b57bf99b
| 220,147
|
def produce_tel_list(tel_config):
"""Convert the list of telescopes into a string for FITS header"""
tel_list = "".join("T" + str(tel) + "," for tel in tel_config["TelType"])
return tel_list[:-1]
|
46a69c9c2b1f006376dc1fe730023c6d8a7db859
| 156,172
|
def GetListOfFeatureNames(feature_names):
"""Extract the list of feature names
from string of comma separated values.
Args:
feature_names: string containing comma separated list of feature names
Returns:
List of the feature names
Elements in the list are strings.
"""
list_of_feature_names = [
feature_names.strip() for feature_names in feature_names.split(',')]
return list_of_feature_names
|
1e922c49afd978cb31d0883cf361f298b2c0aa25
| 470,726
|
def segments_from_time_to_frame_idx(segments, hop_length_seconds):
"""
Converts a sequence of segments (start, end) in time to their values in frame indexes.
Parameters
----------
segements : list of tuple
The list of segments, as tuple (start, end), to convert.
hop_length_seconds : float
hop_length (time between two consecutive frames), in seconds.
Returns
-------
list of integers
The sequence, as a list, in frame indexes.
"""
to_return = []
for segment in segments:
bar_in_frames = [int(round(segment[0]/hop_length_seconds)), int(round(segment[1]/hop_length_seconds))]
if bar_in_frames[0] != bar_in_frames[1]:
to_return.append(bar_in_frames)
return to_return
|
029b0dedc5fbedbf590e89f976f23b555c5a60bf
| 416,668
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.