content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def seconds_to_hhmmss(seconds):
"""Parses the number of seconds after midnight and returns the corresponding HH:MM:SS-string.
Args:
seconds (float): number of seconds after midnight.
Returns:
str: the corresponding HH:MM:SS-string.
"""
if seconds is None:
return None
int_seconds = int(seconds)
m, s = divmod(int_seconds, 60)
h, m = divmod(m, 60)
return "{:02d}:{:02d}:{:02d}".format(h, m, s) | 1ec7e0c8af43054be7d32b3c4330adb144b44002 | 117,416 |
def upper_bound(arr, value):
"""Python version of std::upper_bound."""
for index, elem in enumerate(arr):
if elem > value:
return index
return len(arr) | 747f112e4674c02ea778586c85a4640c371818b5 | 117,417 |
def PolyCoefficients(xt, coeffs):
""" Returns a polynomial for ``x`` values for the ``coeffs`` provided.
The coefficients must be in ascending order (``x**0`` to ``x**o``).
"""
o = len(coeffs)
yt = 0
for i in range(o):
yt += coeffs[i] * xt ** i
return yt | 55be8a5d023b84002accbd03b6155d849117fd24 | 117,418 |
def serialize(message):
"""Convert a message to a binary string."""
return message.SerializeToString() | 1f362d1478bbf32a39ef93366be73bfd7c7f83d2 | 117,424 |
def get_qlabel(qckeyword, calcindex):
"""
Returns a string that can be used as a label for
a given quantum chemistry calculation with qckeyword specified by the user
and calculation index. The string is based on the dependencies of the
corresponding calculation.
"""
calcs = qckeyword.split(',')
calc = calcs[calcindex]
if calc.startswith('compos'):
label = ','.join(calcs[0:calcindex+1])
else:
labels = []
for i in range(0, calcindex):
if calcs[i].startswith('energy') or calcs[i].startswith('compos'):
pass
else:
labels.append(calcs[i])
labels.append(calcs[calcindex])
label = ','.join(labels)
return label | c1108b6fea4739172afe9ad1133311ed94f89eae | 117,425 |
def uniques_from_list(df_col):
"""
Function to create dictionary with number of times an item appears in a column made of lists
:param df_col: pandas series from dataframe, e.g. df[col]
:return count_dict: dictionary with distinct elements as keys and number of times they appear as values
"""
count_dict = {}
for index in range(df_col.shape[0]):
obs = df_col[index]
if obs is not None:
for item in obs:
item = str(item)
if item in count_dict.keys():
count_dict[item] += 1
else:
count_dict[item] = 1
return count_dict | 25dac402f932dd00d5bcf228bbbadcc1005ef048 | 117,426 |
def possibly_flip(is_white, neighbors):
"""
Any black tile with zero or more than 2
black tiles immediately adjacent to it is flipped to white.
Any white tile with exactly 2 black tiles
immediately adjacent to it is flipped to black.
"""
black_neighbors = sum(not is_white for is_white in neighbors)
if not is_white:
if black_neighbors == 0 or black_neighbors > 2:
return True
else:
if black_neighbors == 2:
return False
return is_white | 1d479dd483355a33d400d4f1e17a286aa3d87823 | 117,427 |
def __clean_key(key):
"""
modify name of the key in order to be usable in file store
:param key: key of the data
:return: key with : replaces by __
"""
return str(key).replace(':', '__') | 5dfcb181c271ececad7b195db0a6027bffab97a9 | 117,429 |
from bs4 import BeautifulSoup
def clean_html(text):
"""
Clean text from html tags
:param str text:
:return str text:
"""
try:
text = BeautifulSoup(text, "html").text
except:
print("Exception in clean_html. NoneType argument.")
return ""
return text | cd154cbb8cf622b80e2f7f2afc922e11e25bbc18 | 117,430 |
def normalizeS2(image):
"""Utility method to normalize pixels values in a S2 image
"""
return image.divide(10000).select("B.*").copyProperties(image, ["system:time_start"]) | 116610361866db59faabcca0f1a5fc9d48add6ea | 117,434 |
import json
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(value) | 2066396e0bec6c8e0468c3b1f74e6d3603bff979 | 117,435 |
def trapezoid_left(x1:float, x2:float, x3:float, y1:float, y3:float) -> float:
"""
Calculate the area of the trapezoid with corner coordinates (x2, 0), (x2, y2), (x3, 0), (x3, y3),
where y2 is obtained by linear interpolation of (x1, y1) and (x3, y3) evaluated at x2.
Args:
x1 (float): x coordinate
x2 (float): x coordinate
x3 (float): x coordinate
y1 (float): y coordinate
y3 (float): y coordinate
Returns:
float: the area of the trapezoid
"""
# Degenerate cases
if x2 == x3 or x2 < x1:
return (x3 - x2) * y1
# Find y2 using linear interpolation and calculate the trapezoid area
w = (x3 - x2) / (x3 - x1)
y2 = y1 * w + y3 * (1 - w)
return (x3 - x2) * (y2 + y3) / 2 | 5efa8be3157845d6a250f66aa2160d9e96c3d427 | 117,442 |
def index_to_numbers(idx):
"""Input: line.col, output: (line, col)
"""
return tuple(map(lambda x: int(x), idx.split('.'))) | 6005472026ce6a1c6d0a70986ada0f08a66df30c | 117,448 |
def schedule(epoch, lr):
"""
The learning rate schedule
Parameters:
epoch and current learning rate
Returns:
lr: updated learning rate
"""
if epoch==15:
lr=lr/4
return lr | e04b90c0f3c012a752fc58be2bce2cda2778f9dc | 117,455 |
from datetime import datetime
def determine_time_slot(time):
"""
determines time slot of the day based on given datetime
:type time: str string containing datetime "yyyy-mm-dd hh:mm:ss"
:rtype : int id of the 10-minute slot, 0 <= id < 144
"""
dt = datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
return (dt.hour*60+dt.minute)/10 | 1c38f050538f3ec82a5eb976b95f8adfe7b220c2 | 117,457 |
def lin_utility(x: float) -> float:
""" A linear utility function; the utility of a value x equals x """
return x | 81a98fb6db8d3e0ffa680ff888cf53fafc415c06 | 117,460 |
def grid_traveler(m, n, memo = {}):
"""
@input m - number of rows
@input m - number of columns
@ref https://youtu.be/oBt53YbR9Kk?t=2503
@details How can we frame the problem where we decrease the problem size,
usually by mutating my arguments to my function call.
Given 3x3 example,
if I first move downward, my only next playable area is 2x3.
If I move to the right, my only next playable area is 3x2.
O(m * n) time. O(m) or O(n) (max of m,n) space from stack frames. O(m * n)
space for memo.
"""
# Not a grid with any tiles at all.
if (m < 1 or n < 1):
return 0
if m == 1 or n == 1:
return 1
if (m, n) in memo:
return memo[(m, n)]
# Add up the number of ways to travel a grid after a down move with number
# of ways to travel a grid after a right move.
memo[(m, n)] = grid_traveler(m - 1, n) + grid_traveler(m, n - 1)
return memo[(m, n)] | 54201bd2b58917c462142fa39a3c335150c12099 | 117,465 |
def scale_drop(coords, magnificationRatio):
"""
Scales the coordinate based on the magnification ratio.
coords = ndarray (N,i) where i is the dimensionality (i.e 2D)
magnificationRatio = float - pixel to meters scaling conversion
"""
#changing units to meters
scaledCoords = coords * [magnificationRatio*10**-3,
magnificationRatio*10**-3]
return scaledCoords | ece1f4aac3b6aad13e871b12b274c359ce09e313 | 117,466 |
def brittle_coulumb_fos(s1, s3, Sut, Suc):
"""
Computes the brittle coulumb factor of safety
:param s1: the first principle stress in kpsi
:param s3: the second principle stess in kpsi
:param Sut: Ultimate tensile
:param Suc: Ultimate compression
:return:
"""
if s1 >= 0 and s3 >= 0:
n = Sut/s1
return n
elif s1 >= 0 and s3 < 0:
n_inv = (s1/Sut) - (s3/Suc)
n = 1/n_inv
return n
elif s1 < 0 and s3 < 0:
n = -Suc/s3
return n | 2057510db8ca7ce3859772d67a29e8ea72b26439 | 117,467 |
import importlib
def initialize_config(module_cfg, pass_args=True):
"""
According to config items, load specific module dynamically with params.
eg,config items as follow:
module_cfg = {
"module": "model.model",
"main": "Model",
"args": {...}
}
1. Load the module corresponding to the "module" param.
2. Call function (or instantiate class) corresponding to the "main" param.
3. Send the param (in "args") into the function (or class) when calling ( or instantiating)
"""
module = importlib.import_module(module_cfg["module"])
if pass_args:
return getattr(module, module_cfg["main"])(**module_cfg["args"])
else:
return getattr(module, module_cfg["main"]) | 8a3e32dbbf1bd3119b83d878a5711c5a3828489b | 117,470 |
def get_chart_actual(prefix='', rconn=None):
"""Return True if the chart is showing actual view, False if target view"""
if rconn is None:
return False
try:
actual = rconn.get(prefix+'chart_actual').decode('utf-8')
except:
return False
return bool(actual) | c338a38e9da646a306e0dbaf83d3ece789cedc2d | 117,471 |
def ppm_to_freq(ppm, water_hz=0.0, water_ppm=4.7, hz_per_ppm=127.680):
"""
Convert an array from chemical shift to Hz
"""
return water_hz + (ppm - water_ppm) * hz_per_ppm | 88690c1bfd7829ee5cc97474b51e62be9fa60b30 | 117,475 |
def islinear(doc):
"""
Given sbol document (shallow or full) find linearness
Requires
--------
None
Parameters
----------
doc: sbol document.Document object
a shallow or full doc created using sbol2
Returns
-------
linear: boolean
if the file is linear True, if the file is circular (a plasmid) False
Example
-------
file_in = "https://synbiohub.programmingbiology.org/public/UWYeast_AND_gate/plasmid_0/1/sbol"
doc = sbol2.Document()
doc.read(file_in)
circular = islinear(doc)
"""
linear = True
for top_level in doc:
if top_level.type == "http://sbols.org/v2#ComponentDefinition":
#create a set of types for every top_level component definition
type_set = set(top_level.types)
if "http://identifiers.org/so/SO:0000988" in type_set:
linear = False
return(linear) | c08454b5d7ee940e3dc9b8619e1ff27c2c9b1bfa | 117,483 |
def get_file_name_from_url(url: str) -> str:
"""
Extracts file name from URL.
"""
parts = url.split('/')
result = parts[-1]
pos = result.find('?')
if pos != -1:
result = result[:pos]
return result | 52ab6508f4dd88ce8437fc900cd8aa0536915922 | 117,488 |
def _get_all_issue_tracker_keys(all_issue_trackers):
"""Collect all keys in all dicts."""
ret = set()
for dct in all_issue_trackers:
ret.update(dct.keys())
return ret | 192d1ea9a1d191b2529f5e94beb524802ec20851 | 117,491 |
def _get_score_from_submissions(submissions_scores, block):
"""
Returns the score values from the submissions API if found.
"""
if submissions_scores:
submission_value = submissions_scores.get(str(block.location))
if submission_value:
first_attempted = submission_value['created_at']
weighted_earned = submission_value['points_earned']
weighted_possible = submission_value['points_possible']
assert weighted_earned >= 0.0 and weighted_possible > 0.0 # per contract from submissions API
return (None, None) + (weighted_earned, weighted_possible) + (first_attempted,) | 603aee26958ba9dab8e6475d616b8b72af7501bb | 117,495 |
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad | 165d2e191e91b245d7f0e4162f4f87d4b7fd6129 | 117,502 |
def listmofize(x):
"""
Return x inside a list if it isn't already a list or tuple.
Otherwise, return x.
>>> listmofize('abc')
['abc']
>>> listmofize(['abc', 'def'])
['abc', 'def']
>>> listmofize((1,2))
(1, 2)
"""
if not isinstance(x, (list, tuple)): return [x]
else: return x | e09407526e2491dc4c45068d57711b00ca2a5446 | 117,510 |
def field_has_keyword(field, keywords):
"""For internal use. Determines whether the field has any of
the given keywords."""
for keyword in keywords:
if field.has_keyword(keyword):
return True
return False | 3a7854f7ce30a6af542df5bc44f957e04bde7c53 | 117,511 |
def mirroring_test(matches):
"""
Compute and return a mask for the matches dataframe on each edge of the graph which
will keep only entries in which there is both a source -> destination match and a destination ->
source match.
Parameters
----------
matches : dataframe
the matches dataframe stored along the edge of the graph
containing matched points with columns containing:
matched image name, query index, train index, and
descriptor distance
Returns
-------
duplicates : dataseries
Intended to mask the matches dataframe. Rows are True if the associated keypoint passes
the mirroring test and false otherwise. That is, if 1->2, 2->1, both rows will be True,
otherwise, they will be false. Keypoints with only one match will be False. Removes
duplicate rows.
"""
duplicate_mask = matches.duplicated(subset=['source_idx', 'destination_idx', 'distance'], keep='last')
return duplicate_mask | db3b99c9aec4e2db7545929c5e4e8a1a8dc4c748 | 117,512 |
def get_neighbours(i,j,grid):
"""gets the neighbours of current cell of the current grid.
Args:
i (int): row number of the cell.
j (int): column number of the cell.
grid (np.ndarray) : the current grid.
Returns:
int : total number of neighbours.
"""
N = len(grid)
neighbours = 0
for p in range(i-1, i+2):
for q in range(j-1, j+2):
if p == i and q == j:
continue
if p == N:
p = 0
if q == N:
q = 0
neighbours += grid[p][q]
return neighbours | 44897f9ced2db8a255b29da30f84a543ef00af73 | 117,513 |
def parse_entity(entity, filter_none=False):
"""
Function creates a dict of object attributes.
Args:
entity (object): object to extract attributes from.
Returns:
result (dict): a dictionary of attributes from the function input.
"""
result = {}
attributes = [attr for attr in dir(entity) if not attr.startswith("_")]
for attribute in attributes:
value = getattr(entity, attribute, None)
if filter_none and not value:
continue
value_behavior = dir(value)
if "__call__" not in value_behavior:
result[attribute] = value
return result | 56ba0b8206f8560f541c918a29b67696ef31ac25 | 117,515 |
def get_mph(velocity):
"""
convert m/s to miles per hour [mph].
"""
velocity = velocity * 3600 /1852
return velocity | 0dd5d536a7b0d37c288c64eea9f37f584cdcda19 | 117,517 |
def ifilterfalse_items(item_iter, flag_iter):
"""
ifilterfalse_items
Args:
item_iter (list):
flag_iter (list): of bools
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> item_iter = [1, 2, 3, 4, 5]
>>> flag_iter = [False, True, True, False, True]
>>> false_items = ifilterfalse_items(item_iter, flag_iter)
>>> result = list(false_items)
>>> print(result)
[1, 4]
"""
false_items = (item for (item, flag) in zip(item_iter, flag_iter) if not flag)
return false_items | aded575573fc0e76d19249a1e1e0efb62e87ceac | 117,519 |
def get_operator_type(config):
"""Returns the operator type from the configuration."""
op = config.get("op")
if op is None:
raise ValueError(
"Missing 'op' field in operator configuration: %s" % str(config)
)
return op | 9369014726aa74c8ab95db9099a37aac0a9ec0b8 | 117,520 |
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0] | f302192446058511fc0e4bd7669906807b9ef0a2 | 117,521 |
def targetFeatureSplit( data ):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
feature1 = []
feature2 = []
# if len(data[0]) <= 2:
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features
# else:
# for item in data:
# target.append(item[0])
# feature1.append(item[1])
# feature2.append(item[2])
#
# return target, feature1, feature2 | 6f02047597e60853285c12542b653b89def6c195 | 117,523 |
def sample_to_plane(sample_orientation: str):
"""Return a Cartesian circle plane type for a given sample orienation type"""
sample_to_plane = {'S1': 'YZ', 'S2': 'XZ', 'S3': 'XY',
'S1R': 'ZY', 'S2R': 'ZX', 'S3R': 'YX',
'S4': 'YX to YZ', 'S5': 'XY to XZ', 'S6': 'XZ to YZ'}
plane = sample_to_plane[sample_orientation]
return plane | 09b46c36f02b0bea64d4f6fd05f6a94a460df7fd | 117,529 |
import string
def generate_query_string(filepath: str, project_id: str) -> str:
"""Generates string format of a query.
Args:
filepath: The path to the file containing the input query.
project_id: GCP project id.
Returns:
The query with the GCP project ID.
Raises:
IOError: an error occurs when a file which filepath refers to does not
exist.
"""
try:
with open(filepath) as query_file:
# Remove comment lines including license header.
query_string = ''.join(
line for line in query_file.readlines() if not line.startswith('#'))
query_template = string.Template(query_string)
return query_template.substitute(project_id=project_id)
except IOError as io_error:
raise IOError(
'Query file does not exist: {}'.format(filepath)) from io_error | 4f08e156d4f1f1ed51b2730248ccab94e5e83395 | 117,534 |
def combine_dicts(seq):
"""Combine a list of dictionaries into single one."""
ret = {}
for item in seq:
ret.update(item)
return ret | 4d49e6fb1eec8cb3042a39a018da6f3bb9e3463b | 117,541 |
def capital_pi1(x, w):
"""
Calculates PI_1 value for fqe_I
Related to the self energy of the dynamic susceptibility
Parameters
----------
x : float
q (momentum transfer) / kappa (inverse mag. correlation length)
w : float
Return
------
PI_1 : float
"""
a = 0.46
b = 3.16
return ((1 + b/x**2)**(2 - 3.0/4) - 1j * a * w)**(3.0/5) | be75b22ee6e4af513f371f5801e5bee4fb17d0bd | 117,546 |
def is_hydrogen(atom):
"""Check if this atom is a hydrogen"""
return atom.element.symbol == "H" | e2605e8f6982890794506f2e00f215d715ec4e32 | 117,547 |
import torch
def unpack(x: torch.Tensor):
"""Unpack the real and imaginary parts."""
return x[..., 0], x[..., 1] | f8aa9366c7329d6b225ef895d23170c36d8682d3 | 117,550 |
def trim_earlier(array, t):
"""
Return the portion of an array falling after a reference value.
Parameters
----------
array: list or numpy.ndarray
The array to trim. Must be monotonically increasing.
t: number
The reference value used for trimming the array.
Returns an array corresponding to the portion of `array` after the
first value greater than or equal to `t`.
For example:
trim_earlier([1, 3, 5], 2) == [3, 5]
trim_earlier([4, 6, 8], 6) == [6, 8]
trim_earlier([10, 20, 30], 40) == []
"""
i = 0
while i < len(array) and array[i] < t:
i += 1
return array[i:] | b029982da17cca2eb85c8dfddcbe74ecc92d198a | 117,551 |
import unicodedata
def kansuji_to_num(x: str) -> int:
"""Convert a sequence of *kansuji* to the corresponding number.
*Kansuji* is a representation of a number, which is used in Chinese and Japanese. This function converts a sequence of *kansuji* (a *kanji* string) to the corresponding number.
Parameters
----------
x : str
A sequence of *kansuji*.
Returns
-------
int
The corresponding number for a given *kansuji* string.
Examples
--------
>>> kawasemi.util.kansuji_to_num('三十五')
35
>>> kawasemi.util.kansuji_to_num('百六')
106
"""
result = 0
if x == '' or x is None:
return result
nums = [int(unicodedata.numeric(l)) for l in x[:]]
nums2 = [nums.pop(0)]
for n in nums:
if n < 10:
nums2.append(n)
else:
nums2[-1] *= n
for n in nums2:
result += n
return result | 21cf0adf6920020d00596dcc99e4865a75bfcedb | 117,556 |
def calc_check_digits(number):
"""Calculate the check digits for the specified number. The number
passed should not have the check digit included."""
check1 = (10 - sum((3, 1)[i % 2] * int(n)
for i, n in enumerate(number[:9]))) % 10
check2 = (check1 + sum(int(n) for n in number[:9])) % 10
return '%d%d' % (check1, check2) | 5f4a01d516fb0cbacd727c00526250bdc2c37efc | 117,557 |
def ispalindrome(n):
"""
checks whether the integer n is a palindrome
"""
s = str(n)
return s == s[::-1] | 47b1cadb203fddd0509357135c71b94b989b982c | 117,562 |
def get_runner_image_url(experiment, benchmark, fuzzer, docker_registry):
"""Get the URL of the docker runner image for fuzzing the benchmark with
fuzzer."""
return '{docker_registry}/runners/{fuzzer}/{benchmark}:{experiment}'.format(
docker_registry=docker_registry,
fuzzer=fuzzer,
benchmark=benchmark,
experiment=experiment) | 0b6bb85b6f6c6e84e6ef0f35cf213d68ad14cd11 | 117,563 |
import re
def valid_pid(entry):
"""Validate pid entries"""
if len(entry) == 9:
check = re.match(r'\D', entry)
if check is None:
return True
else:
return False
else:
return False | 84397409308e6ee549ff912b96788feb1ae48b58 | 117,567 |
def next_field_pad(pos_prev, offset, width, display):
"""
Local helper calculates padding required for a given previous position,
field offset and field width.
pos_prev Position following previous field
offset Offset of next field
width Width of next field
display True if next field is to be displayed, otherwise False.
Returns a tuple containing::
[0] True if next field is to be placed on next row
[1] Amount of padding (0..11) to be added from end of last field or start of row
[2] Position of next free space after new element
"""
if not display:
# Field not to be displayed
return (False, 0, pos_prev)
if (offset < pos_prev) or (offset+width > 12):
# Force to next row
next_row = True
padding = offset
else:
# Same row
next_row = False
padding = offset - pos_prev
pos_next = offset + width
return (next_row, padding, pos_next) | 1f3d503d57813c51a3303a1049f0364d9aedf221 | 117,578 |
from typing import List
from typing import Dict
def confusion_matrix_binary(
y_true: List[int],
y_pred: List[int]
) -> Dict[str, int]:
"""
Compute tp, tn, fp, fn
Parameters
----------
y_true : list of ints
True labels
y_pred : list os ints
Predicted labels
Returns
-------
Dict[str, int]
Dictionary with number of samples of tp, tn, fp, fn
Examples
--------
>>> from evaluations.classification import confusion_matrix_binary
>>> confusion_matrix_binary([1, 1, 0, 0], [1, 0, 0, 1])
{'tp': 1, 'tn': 1, 'fp': 1, 'fn': 1}
"""
true_pos, true_neg, false_pos, false_neg = 0, 0, 0, 0
for el_true, el_pred in zip(y_true, y_pred):
if el_true and el_pred:
true_pos += 1
elif not el_true and not el_pred:
true_neg += 1
elif el_true and not el_pred:
false_neg += 1
elif not el_true and el_pred:
false_pos += 1
return {'tp': true_pos, 'tn': true_neg, 'fp': false_pos, 'fn': false_neg} | 6297ae5f736c349cf482f36429d6ca1eb4461d1f | 117,583 |
def rekey(batch, key_map):
"""Rekeys a batch using key_map.
key_map specifies new_key: old_key pairs.
Args:
batch: a dictionary to modify.
key_map: a dictionary that new keys to old keys. So if you want your new
dataset to have keys 'inputs' and 'targets', where the old task just had
'targets, pass {'inputs': 'targets', 'targets': 'targets'}.
Returns:
a new batch dict.
"""
return {key: batch[value] for key, value in key_map.items()} | e39493673e72e6dbe3eea61e98767cb9f4b76b85 | 117,584 |
def generate_unique_name(pattern, nameset):
"""Create a unique numbered name from a pattern and a set
Parameters
----------
pattern: basestring
The pattern for the name (to be used with %) that includes one %d
location
nameset: collection
Collection (set or list) of existing names. If the generated name is
used, then add the name to the nameset.
Returns
-------
str
The generated unique name
"""
i = 0
while True:
n = pattern % i
i += 1
if n not in nameset:
return n | 6b4860a6733b924939027a349677c88f5e69788c | 117,587 |
def _to_bool(string_value):
"""Convert string to boolean value.
Args:
string_value: A string.
Returns:
Boolean. True if string_value is "true", False if string_value is
"false". This is case-insensitive.
Raises:
ValueError: string_value not "true" or "false".
"""
string_value_low = string_value.lower()
if string_value_low not in ('false', 'true'):
raise ValueError(
'invalid literal for boolean: %s (must be "true" or "false")' %
string_value)
return string_value_low == 'true' | 8afe90158b006727ec7209a741400dda6f3e80dc | 117,591 |
def tests_use_db(request):
"""Check if any of the tests in this run require db setup"""
return any(item for item in request.node.items if item.get_marker("django_db")) | 84035ea74f586d3007eedf56f921eea6a1124fad | 117,594 |
def _nova_xlate_modnames(mod_names, name, fpath, suffix, mod, mode='mod_names'):
"""
Translate (xlate) "service" into "/service"
args:
name :- the name of the module we're loading (e.g., 'service')
fpath :- the file path of the module we're loading
suffix :- the suffix of the module we're loading (e.g., '.pyc', usually)
mod :- the actual imported module (allowing mod.__file__ examination)
mode :- the name of the load_module variable being translated
return:
either a list of new names (for "mod_names") or a single new name
(for "name" and "module_name")
"""
new_modname = '/' + name
if mode in ("module_name", "name"):
return new_modname
return [ new_modname ] | 3e1528cf9ba58d857e69ddf18ada536f5da132c1 | 117,598 |
import pytz
def localize_datetime_utc(date_time):
"""
Localizes in the UTC timezone a given Datetime object.
:param date_time: The object to be localized.
:return: Localized Datetime object in the UTC timezone.
"""
return pytz.utc.localize(date_time) | 7cecd2d34a061fe5da237b698fb84d5e12180f06 | 117,600 |
def hex_to_rgb(h):
"""Converts a "#rrbbgg" string to a (red, green, blue) tuple.
Args:
h: a hex string
Returns:
an R, G, B tuple
"""
h = h.lstrip("#")
return tuple(int(h[i : i + 2], 16) for i in (0, 2, 4)) | a48579954348962ebbac087359b14b13e7883e26 | 117,602 |
def _extended_euclidean(q, r):
"""Return a tuple (p, a, b) such that p = aq + br,
where p is the greatest common divisor.
"""
# see [Davenport], Appendix, p. 214
if abs(q) < abs(r):
p, a, b = _extended_euclidean(r, q)
return p, b, a
Q = 1, 0 # noqa: N806
R = 0, 1 # noqa: N806
while r:
quot, t = divmod(q, r)
T = Q[0] - quot*R[0], Q[1] - quot*R[1] # noqa: N806
q, r = r, t
Q, R = R, T # noqa: N806
return q, Q[0], Q[1] | ccb7ce4ec3d94b86aea856263fc11aca456d1c09 | 117,603 |
import warnings
def calc_dimless_tes_th_flex(sh_dem, dhw_dem, en_av_tes):
"""
Calculate dimensionless thermal storage flexibility beta_th
Parameters
----------
sh_dem : float
Annual space heating demand in kWh
dhw_dem : float
Annual hot water demand in kWh
en_av_tes : float
Average stored usable amount of thermal energy within TES for a
whole year in kWh
Returns
-------
beta_th : float
Dimensionless thermal storage flexibility
"""
assert sh_dem >= 0
assert dhw_dem >= 0
assert en_av_tes >= 0
if sh_dem + dhw_dem == 0:
msg = 'Cannot calculate beta_th, as thermal demands are zero!' \
' Going to return None.'
warnings.warn(msg)
beta_th = None
else:
# Calculate beta_th
beta_th = en_av_tes / ((sh_dem + dhw_dem) / 365)
return beta_th | 569a7eae09116a860c8ddfca2d187111095b73eb | 117,604 |
def get_int_list(input_str):
""" Returns the comma-separated input string as a list of integers. """
items = input_str.split(",")
for index, item in enumerate(items):
items[index] = int(item)
return items | 09173e8c6ec742e9b0a5bc6d22e828f310ea5bda | 117,607 |
def qmap(f, q):
"""
Apply `f` post-order to all sub-terms in query term `q`.
"""
if hasattr(q, '_fields'):
attrs = []
for field in q._fields:
attr = getattr(q, field)
attrs.append(qmap(f, attr))
cls = type(q)
obj = cls(*attrs)
return f(obj)
elif isinstance(q, (list, tuple)):
cls = type(q)
return cls(qmap(f, x) for x in q)
return f(q) | 993dc15cd875a786f89379968d793599ad5af8c9 | 117,609 |
from typing import Union
import torch
def wrap_device(d: Union[str, torch.device]) -> torch.device:
"""
Wraps strings into torch.device objects.
Given torch.device objects are returned unmodified.
"""
assert isinstance(d, (str, torch.device))
if isinstance(d, str):
return torch.device(d)
return d | 06787d3fb0ef583730e9e2553a9c553d632bb0fb | 117,610 |
def menu_entradas(request):
"""
Funcão que contém as configurações do menu de entradas do patrimônio.
Retorna um dicionário com as configurações
"""
menu_buttons = [
{'link': '/patrimonio/entradas/ferramenta', 'text': 'Entrada de ferramenta'},
{'link': '/patrimonio/entradas/patrimonio_1', 'text': 'Entrada de patrimônio'},
]
button_return = {'link': '/patrimonio', 'text': 'Voltar'}
context = {
'app': 'Patrimônio',
'menu': 'Entradas',
'menu_buttons': menu_buttons,
'button_return': button_return,
}
return context | fc80e469b91a48f4d09593ec6be68778bef162dd | 117,611 |
def applied_to_degree(record, degree):
""" (str, str) -> bool
Return True iff the student represented by record has applied to the
specified degree program.
>>> applied_to_degree('Paul Gries,Ithaca High School,1986,BIO,60,70,CHM,80,90,CAT,95,96,BEng', 'BEng')
True
>>> applied_to_degree('Jacqueline Smith,Fort McMurray Composite High,2015,MAT,90,NE,ENG,92,88,CHM,80,85,BArts', 'BEng')
False
"""
return degree in record | cae6ab055c9e9c8fd7ccce5f8e581740c098ff80 | 117,615 |
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type, with space added if needed."""
if inner_type.endswith('>'):
format_string = '{template}<{inner_type} >'
else:
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type) | 46f8aabebddd546f318b519b57a9e025459490c5 | 117,619 |
def get_items_except(seq, indices, seq_constructor=None):
"""Returns all items in seq that are not in indices
Returns the same type as parsed in except when a seq_constructor is set.
"""
sequence = list(seq)
index_lookup = dict.fromkeys(indices)
result = [sequence[i] for i in range(len(seq)) \
if i not in index_lookup]
if not seq_constructor:
if isinstance(seq, str):
return ''.join(result)
else:
seq_constructor = seq.__class__
return seq_constructor(result) | e506dfcfe406c0e46f7cfdfb943ea6d809c7ccd2 | 117,620 |
def find_closest_road(data, from_):
"""
Find closest road with Breadth First Search (BFS).
Args:
data: array for search
from_: [x, y] starting point for search
"""
queue = [(from_, [tuple(from_)])]
visited = set()
visited.add(tuple(from_))
while len(queue):
# pop position & paths
current, paths = queue.pop(0)
if data[current[0], current[1]] != data[from_[0], from_[1]]: # if found road then return paths
return paths
for (dix, diy) in [(-1, -1), (-1, 0), (0, -1), (1, 1), (1, 0), (0, 1), (1, -1), (-1, 1)]:
to_ = (
current[0] + dix,
current[1] + diy
)
if to_ not in visited and 0 <= to_[0] < data.shape[0] and 0 <= to_[1] < data.shape[1]:
# add to queue
queue.append((to_, paths + [to_]))
visited.add(to_) | 3c3d171c58942ca2e6eb8147c8ee0ed9503a8294 | 117,622 |
import struct
def parse_nv_block(decompressor, nv_bytes):
"""
This function parses the compressed name-value header block.
:param decompressor: A ``zlib`` decompression object from the stream.
:param nv_bytes: The bytes comprising the name-value header block.
"""
headers = {}
if not nv_bytes:
return headers
data = decompressor.decompress(nv_bytes)
# Get the number of NV pairs.
num = struct.unpack("!L", data[0:4])[0]
data = data[4:]
# Remaining data.
for i in range(0, num):
# Get the length of the name, in octets.
name_len = struct.unpack("!L", data[0:4])[0]
name = data[4:4+name_len]
data = data[4+name_len:]
# Now the length of the value.
value_len = struct.unpack("!L", data[0:4])[0]
value = data[4:4+value_len]
data = data[4+value_len:]
# You can get multiple values in a header, they're separated by
# null bytes. Use a list to store the multiple values.
vals = value.split(b'\0')
if len(vals) == 1:
vals = vals[0]
headers[name] = vals
return headers | 533f90099c6c4fcb988840835afab090455a4ec7 | 117,624 |
def message_link(guild_id, channel_id, message_id) -> str:
"""
Generates a message link from the given Ids
>>> message_link(1, 2, 3)
'https://discordapp.com/channels/1/2/3'
"""
return f'https://discordapp.com/channels/{guild_id}/{channel_id}/{message_id}' | c41e8ef545da1e354619cd962f0f99da9ae6a8c5 | 117,631 |
def make_counter_summary(counter_element):
"""Turns a JaCoCo <counter> tag into an llvm-cov totals entry."""
summary = {}
summary["covered"] = int(counter_element.attrib["covered"])
summary["notcovered"] = int(counter_element.attrib["missed"])
summary["count"] = summary["covered"] + summary["notcovered"]
summary["percent"] = (100.0 * summary["covered"]) / summary["count"]
return summary | 5e3566684fa170ca1f1e98bf7e09f1ea981f332a | 117,639 |
def clean(name):
"""
Prepare name for query.
Remove numeric characters which cause the query to break if at the start.
"""
name = name.replace("-", "_").replace(".", "X")
if name[0].isnumeric():
name = f"X{name[1:]}"
return name | e67b55df6e32aef11bb47b1d49b779d5644da5cf | 117,644 |
import hashlib
def get_md5sum_file(fullname, blksize=2**15):
"""Returns md5 checksum for given file.
"""
md5 = hashlib.md5()
with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(blksize), b''):
md5.update(chunk)
return md5.hexdigest() | 46930d53fd0120f6080a9c760a2a24069467b5c3 | 117,646 |
import requests
import json
def get_assoc_comments(subm_id):
""" For a submission ID, request a JSON of associated comments."""
cs_page = requests.get(
"https://api.pushshift.io/reddit/search/comment/?link_id={}".format(subm_id)
)
try:
comments = json.loads(cs_page.text)["data"]
except ValueError:
comments = [] # Trying to decode from None (JSONDecodeError)
return comments | 4a4d58b92b6b7225ac824738c36a69216b76af0f | 117,659 |
def white_dwarf_mass(M):
"""
Initial to final mass function to return white dwarf mass
as a function of the mass of its main sequence star projenitor.
IFMF taken from Salaris et. al. 2009
"""
if M < 4.0:
wd_mass = 0.134 * M + 0.331
else:
wd_mass = 0.047 * M + 0.679
return wd_mass | 02f805766a21acba603b3c8c0629b19a4538b2a7 | 117,662 |
def filter_source_files(source_files, target_dates):
"""
Filters dataset files by date. This optimizes time series generation by only
using files that belong to the dates for which new dataset files were fetched for.
:param source_files: list of filenames to filter
:param target_dates: list of dates to use as a filter
:return: list of filenames from which to generate time series
"""
filtered_files = []
for filename in source_files:
for date in target_dates:
if date in filename:
filtered_files.append(filename)
break
return filtered_files | fca6020c120f104404655c97bfa74e6e64755cb5 | 117,669 |
def cnf_get_sign(literal):
"""
given a literal LITERAL, return the false if it is negated
true otherwise
"""
return literal > 0 | 6c301be87171f2b5f53b5c55e9fa0a4e862f2853 | 117,671 |
import random
def should_sample_as_per_zipkin_tracing_percent(tracing_percent):
"""Calculate whether the request should be traced as per tracing percent.
:param tracing_percent: value between 0.0 to 100.0
:type tracing_percent: float
:returns: boolean whether current request should be sampled.
"""
return (random.random() * 100) < tracing_percent | ad258d9ce4b0dd1b9928a79be6ea55f0b3b76e2b | 117,674 |
def unsupported(function):
"""Decorator to mark player function as unsupported."""
function.__is_supported__ = False
return function | 770cea6b52ed158abfac18016192d512177dc1fb | 117,684 |
def play(sim, l_pol, r_pol, n_episodes = 100):
"""Run n_episodes episodes of Pong using the policies l_pol and r_pol"""
try:
for i in range(n_episodes):
while not sim.done:
state = sim.get_state()
l_a = l_pol.get_action(state)
r_a = r_pol.get_action(state)
sim.step(l_a, r_a)
sim.new_episode()
except KeyboardInterrupt:
pass
return sim.score | cc0813ae99e0d8dd056ac79b54a1603df300c980 | 117,685 |
import tokenize
def whitespace_before_inline_comment(logical_line, tokens):
"""
Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.NL:
continue
if token_type == tokenize.COMMENT:
if not line[:start[1]].strip():
continue
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
return (prev_end,
"E261 at least two spaces before inline comment")
if (len(text) > 1 and text.startswith('# ')
or not text.startswith('# ')):
return start, "E262 inline comment should start with '# '"
else:
prev_end = end | 6f1356cb2fbfd3c7465d018e19311c4f005c1338 | 117,688 |
import re
def embed_url(video_url):
"""
desc: Convert video_url like
http://www.youtube.com/watch?v=xxxxxxxxxxx or http://youtu.be/xxxxxxxxxxx
to https://www.youtube.com/embed/xxxxxxxxxxxx
"""
regex = r"(?:https:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)\/(?:watch\?v=)?(.+)"
return re.sub(regex, r"https://www.youtube.com/embed/\1",video_url) | 735caaa1bfce1eb1fa62717299ece6300d275d3c | 117,690 |
def patch_pandas_dataframe_to_csv(mocker):
"""Patch the pandas.DataFrame.to_csv method."""
return mocker.patch("pandas.DataFrame.to_csv") | d83e3fb9137addf8002b3e124947de83e1e6b250 | 117,691 |
def remove_stopwords_f(x, stopwords):
"""
Returns all strings in a Pandas Series `x` with words in list `stopwords` removed.
"""
def rm_words(text):
word_list = text.split()
word_list = [word for word in word_list if not word in set(stopwords)]
return ' '.join(word_list)
return x.apply(rm_words) | 58820b1c9c6ed840f0a2b03dd99428580f34d3e7 | 117,693 |
from typing import Counter
def majority_vote(neighbour_labels, _ = None):
"""Returns the label that is most common in `neighbour_labels`."""
return Counter(neighbour_labels).most_common(1)[0][0] | 3ad29de9e0c2dafa028af3ab43f504f59b064481 | 117,694 |
from typing import OrderedDict
def _as_dict(parser):
"""
Go over all sections in the parser,
convert's there's key/value as dictionary that
for every section in the parser has dictionary
with key/value pairs (both str).
:param parser: (self)
:return: dict with key/value as str
"""
d = OrderedDict()
for section in parser.sections():
d[section] = OrderedDict()
for key in parser.options(section):
d[section][key] = parser.get(section, key)
return d | 105fe22f1237f92a9b542af96c41d7a93c75bc43 | 117,695 |
def replace_back_reference(
back_reference_match_object,
alternatives_combination
):
"""
Replace a back reference with the appropriate alternative.
"""
group_index = int(back_reference_match_object.group('group_index'))
alternative = alternatives_combination[group_index - 1]
return alternative | c84b328bacda697c95d12d930906fae0ded3392a | 117,698 |
def listbox_width(items, max_width=30):
"""Calculate the width for a listbox, based on a list of strings and a
maximum width.
listbox_width(["foo", "bar", "asdf"], 10) #=> 4 ("asdf")
listbox_width(["foo", "asdf", "beep boop"], 5) #=> 5 (max_width)
"""
max_item_width = max(map(len, items))
return min(max_width, max_item_width) | c956cb683d717649b6ccfc498edc95be0251913f | 117,699 |
import click
def fetch_config(ctx):
""" Fetch `config_file` from context
"""
config = ctx.obj and ctx.obj.get('config', None)
if not config:
_opts = dict((o.name, o) for o in ctx.parent.command.params)
raise click.BadParameter('Must specify configuration file',
ctx=ctx.parent, param=_opts['config_file'])
return config | 6ae93023043af0054578f01709915e0149b78cf4 | 117,700 |
def get_ls_header() -> str:
"""Get the header in the Note Ls command.
:returns: Header.
"""
return "ID" + (" " * 31) + "| Title" + (" " * 36) + "| Tags\n" | be2cdfd893ea1a73c73f5e85469d2304cc6903f3 | 117,703 |
import random
def split(dataset, ratio):
"""
Split the dataset into a training set and a test set
:param dataset: dataset top split
:param ratio: percent of the row in the training set from the dataset
:return: training set, test set
"""
trainingSet = []
testSet = []
# separate randomly the training and the test set
for row in range(len(dataset) - 1): # for each row
# convert the strings, which had to be floats.
for col in range(2, 4):
dataset[row][col] = float(dataset[row][col])
# convert the string, which has to be an int
dataset[row][4] = int(dataset[row][4])
# split randomly the set
if random.random() < ratio:
trainingSet.append(dataset[row])
else:
testSet.append(dataset[row])
return trainingSet, testSet | c4baed621b5e053e85fc4c8ea019dfee8388c20d | 117,705 |
def create_unique_column_name(df_cols, col_name_prefix):
"""
given the columns of a dataframe, and a name prefix, return a column name which
does not exist in the dataframe, AND which is prefixed by `prefix`
The approach is to append a numeric suffix, starting at zero and increasing by
one, until an unused name is found (eg, prefix_0, prefix_1, ...).
"""
suffix = 0
while f"{col_name_prefix}{suffix}" in df_cols:
suffix += 1
return f"{col_name_prefix}{suffix}" | 423319c5cf81c7660e832edc841b82515e51949b | 117,721 |
def strip_unwanted(data_str):
"""
Strip out any unwanted characters from the table data string
"""
# Right now, this just requires stripping out commas
return data_str.replace(',', '') | 003936c32d68b5d8114d3b8919804781f9cf3faf | 117,722 |
import re
def match_cont(patten, cont):
"""
正则匹配(精确符合的匹配)
Args:
patten 正则表达式
cont____ 匹配内容
Return:
True or False
"""
res = re.match(patten, cont)
if res:
return True
else:
return False | 15390ac40e6a4a87a779a81c3542faec342b9ab5 | 117,724 |
def get_rect_ymax(data):
"""Find maximum y value from four (x,y) vertices."""
return max(data[0][1], data[1][1], data[2][1], data[3][1]) | b491939b9e74504588d126027ae5e360f06f92ec | 117,727 |
def get_form(form, request, *args, **kwargs):
"""
Get form instance that matches request type.
:param form: Form type to instantiate.
:param request: Request to read data, if POST request.
:param args: Additional positional arguments to the form constructor.
:param kwargs: Additional keyword arguments to the form constructor.
:return: form instance.
"""
if request.method == "POST":
return form(request.POST, *args, **kwargs)
else:
return form(*args, **kwargs) | 14d57595d2da7a3622e1052b643ff67174a50b4b | 117,730 |
def common_elements(left, right) -> int:
""" Returns the number of elements that are common between `left` and `right`"""
common = set(left) & set(right)
return len(common) | db16af6f1ccdb5ecaebb2b07df19a3a8ba24fac8 | 117,731 |
def tick_classes(request):
"""
Fixture for Tick based datetime offsets available for a time series.
"""
return request.param | 69e3596fff302f73ad243756ec62e9b9e1b4177d | 117,732 |
import csv
def read_csv(filename):
"""Read csv file to feed to scatter plot."""
numbers = []
squared = []
with open(filename) as f:
reader = csv.reader(f)
next(reader)
for row in reader:
numbers.append(int(row[0]))
squared.append(int(row[1]))
return numbers, squared | ad77f4c140b47c6c9faa096b5a96dfb6147834f8 | 117,733 |
def get_resource_creators(resource):
"""Gets all creators of a resource record and concatenate them into a string
separated by commas.
Args:
resource (dict): resource record data.
Returns:
creators (string): comma-separated list of resource creators.
"""
creators = []
if resource.get("linked_agents"):
for linked_agent in resource.get("linked_agents"):
if linked_agent.get("role") == "creator":
creators.append(linked_agent.get("_resolved").get('display_name').get('sort_name'))
return ", ".join(creators) | 42be64a925a50ec8b1cb5ab0add5e23547a0ff9c | 117,735 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.