content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import functools
import warnings
def deprecated(fn):
""" This is a decorator that marks functions as deprecated. It will result in a warning being emitted when the function is used.
Usage:
@deprecated
def my_func():
pass
"""
@functools.wraps(fn)
def wrapper(*a, **kw):
msg = 'Call to deprecated function {}'.format(fn.__name__)
warnings.warn_explicit(msg,
category=DeprecationWarning,
filename=fn.func_code.co_filename,
lineno=fn.func_code.co_firstlineno + 1)
return fn(*a, **kw)
return wrapper
|
1597d515eb5ea5ccabd1f4caeba4c89be6cbd65c
| 89,188
|
def _firstspin(bands):
"""Get only the bands for the first spin if multiple are contained."""
if bands.ndim not in [2, 3]:
raise ValueError('invalid input')
if bands.ndim == 3:
bands = bands[0]
return bands
|
d412c864cc15ad0e4a3b357c085a0acb010b5e45
| 89,191
|
def remap_interval(val, min1, max1, min2, max2):
""" Remap a value from one range to a new range.
- val: the value to remap
- min1: the minimum for the old range `val` is within
- max1: the maximum for the old range `val` is within
- min2: the minimum for the new range
- max2: the maximum for the new range
# The doctests in this functiona originally were nearly sufficient, as they tested
several numbers at different intervals.
: I added an additional unit test to check that the remap function works with negative numbers,
as it would need to remap values from -1 to 1, to 0 to 255.
>>> remap_interval(0.5, 0, 1, 0, 10)
5.0
>>> remap_interval(5, 4, 6, 0, 2)
1.0
>>> remap_interval(5, 4, 6, 1, 2)
1.5
>>> remap_interval(0, -1, 1, 0, 255)
127.5
"""
# Take the value and remove the previous minimum, then multiply by the difference
# raitio, and then add the new minimum
return (((max2 - min2) / (max1 - min1)) * (val - min1)) + min2
|
d737a5e6735a4257e0eafed7ec7c6bf5be75acdc
| 89,192
|
import tokenize
import token
def _signature_strip_non_python_syntax(signature):
"""
Private helper function. Takes a signature in Argument Clinic's
extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
|
a20ccd8a9b929236677c9ff038463c5aa5e0743c
| 89,197
|
from typing import Tuple
from typing import List
import re
def get_ranges_and_departures(
header: str,
) -> Tuple[List[List[Tuple[int, int]]], List[int]]:
"""
Parse the header of the incoming data. It's formatted as::
name: #-#[ or #-# [...]]
Return the list of tuples of ranges formatted as [[(#, #), (#, #)], ...]
and the list of row indices that start with the word 'departure' for Part 2
"""
ranges = []
departure_lines = []
for i, line in enumerate(header.split("\n")):
if line.startswith("departure"):
departure_lines.append(i)
raw_ranges = re.findall(r"(\d+)", line)
ranges.append(
[
(int(left), int(right))
for left, right in zip(raw_ranges[::2], raw_ranges[1::2])
]
)
return ranges, departure_lines
|
ac98b3a7b38d092f1e7da95664d666fe3f34377a
| 89,198
|
def energy_photon(frequency, wavelength):
"""Usage: Calculate the energy of a photon using frequency and wavelength """
equation = ((6.62607004e-34 )*3e+8)/wavelength
return equation
|
919eee4a39d04805f61d67f97c37a77da6308817
| 89,202
|
def sum_of_numbers(list_of_numbers):
"""Returns sum of the numbers in the list."""
return sum(list_of_numbers)
|
aa2b1e54b79d3082541ec64ff7c82077bcef1f6e
| 89,204
|
import json
def json_dumps_compact(data): # noqa: CCR001
"""Format provided dictionary into compact JSON. Lists will be in one line rather than split on new lines.
Args:
data: JSON-serializable dictionary
Returns:
str: JSON-formatted string with lists compacted into a single line
"""
clean_data = {}
# Check each key/value pair to determine if any intermediary strings are needed for later formatting
for key, raw in data.items():
# PLANNED: Convert to FP and recursive calls?
if isinstance(raw, list):
values = [f'``{value}``' if isinstance(value, str) else value for value in raw]
clean_data[key] = '[' + ','.join(map(str, values)) + ']'
else:
clean_data[key] = raw
# Format the dictionary into JSON and replace the special characters used as intermediaries
raw_json = json.dumps(clean_data, indent=4, separators=(',', ': '), sort_keys=True)
return (
raw_json
.replace(': "[', ': [')
.replace(']"', ']')
.replace('``', '"')
.replace("'", '"')
)
|
704dae2c0de216453e1d053be749514d5487859c
| 89,206
|
import json
def load_session_data(input_file_name):
"""Load and return session data from a json file."""
with open(input_file_name, "r") as infile:
session_data = json.load(infile)
return session_data
|
57b089497638a50cdd2ea4a05515d385a2de1c6b
| 89,210
|
def cube(x):
"""
Cube.
"""
return pow(x, 3)
|
b0b72d8cffb5195ecd1011fd0f1fc9eac68c2141
| 89,211
|
def splitter(value):
"""
Conversion routine for lists. Accepts comma-separated strings.
:param str value: The value to convert.
:returns: A list of strings.
:rtype: ``list``
"""
return [v.strip() for v in value.split(',')]
|
5b1fc2a62cb6167d26cd3dc3aea513615a85500f
| 89,212
|
def _round(value):
""" round redefinition for using the same format consistently. """
return round(value, 1)
|
4a41164724490418c12b21aa3b79e4e587933ef8
| 89,215
|
def floatToString3(f: float) -> str:
"""Return float f as a string with three decimal places without trailing zeros
and dot.
Intended for places where three decimals are enough, e.g. node positions.
"""
return f"{f:.3f}".rstrip("0").rstrip(".")
|
085042f5d61310343e4e73c76bcb5a8ddb626159
| 89,221
|
import re
def strip_ansi(text):
"""
Removes all ANSI escape codes in a given text.
"""
return re.sub(r"\x1b\[(\d|;)+m", "", text)
|
4d651d98fd55faffedb0ec1a102ad721fffe51e8
| 89,225
|
def bytes_to_human_readable_repr(b: bytes) -> str:
"""Converts bytes into some human-readable representation. Unprintable
bytes such as the nul byte are escaped. For example:
>>> b = bytes([102, 111, 111, 10, 0])
>>> s = bytes_to_human_readable_repr(b)
>>> print(s)
foo\n\x00
>>> print(repr(s))
'foo\\n\\x00'
"""
return str(b)[2:-1]
|
76616d213ac878fc6270dcaecdb70012160721a9
| 89,228
|
def has_access(user, asys):
"""Returns true, if the user has access to the configuration of the given AS."""
return user.is_superuser or asys.owner.users.filter(id=user.id).exists()
|
75521487b69c67bbe5f318d1efe42c4a9ad53409
| 89,229
|
def text_urldata(urldata):
"""
Convert ``urldata`` into text.
:param urldata: a dictionary mapping bytes to lists of bytes.
:return: a dictionary mapping text to lists of text.
"""
return dict([(k.decode("utf-8"), [vv.decode("utf-8") for vv in v])
for [k, v] in urldata.items()])
|
5c0f754e71a96ffc5f4ead09af3a4a5b812d37a0
| 89,236
|
def parse_reaction(line):
"""
Takes a string declaring a new reaction in an Avida environment file and
returns the name of the associated task.
"""
sline = line.split()
return sline[2].strip()
|
9f364da02d921f171c924884624b3377d4b66438
| 89,241
|
def make_entry_idx(name='Catalan', scriptable='yes', spell='yes'):
"""
Pass values for name, scriptable, spell attributes to the idx entry tag.
Args:
name (str), scriptable (str), spell (str): <idx:entry> attributes
Returns:
<idx:entry> tag with attributes.
Notes:
A sequential 'id' attribute will be added separately to <idx:entry>.
"""
attrs = [name, scriptable, spell]
entry = '<idx:entry name="{0}" scriptable="{1}" spell="{2}">'.format(*attrs)
return entry
|
79b662b8c259485161544b213bcae0e1e38486d9
| 89,244
|
def parse_busco_result(path_to_busco_result):
"""
Args:
path_to_busco_log (str): Path to the mash log file.
Returns:
dict: Parsed busco report
For example:
{ "complete_single": 778,
"complete_duplicate": 1,
"fragmented": 1,
"missing": 1,
}
Raises:
Exception: When BUSCO output file is empty
"""
busco_output = []
with open(path_to_busco_result, 'r') as busco_result:
for line in busco_result:
busco_output.append(line)
if (len(busco_output) > 0):
busco_result = {}
busco_result['complete_single'] = int(busco_output[10].split("\t")[1])
busco_result['complete_duplicate'] = int(busco_output[11].split("\t")[1])
busco_result['fragmented'] = int(busco_output[12].split("\t")[1])
busco_result['missing'] = int(busco_output[13].split("\t")[1])
busco_result['total'] = int(busco_output[14].split("\t")[1])
else:
raise Exception("BUSCO output file has no contents.")
return busco_result
|
4973cf77812100d23e990f9bf88bedbe7472082d
| 89,246
|
def calc_median(nums):
"""Calculate the median of a number list."""
N = len(nums)
nums.sort()
if N % 2 == 0:
m1 = N / 2
m2 = (N / 2) + 1
m1 = int(m1) - 1
m2 = int(m2) - 1
median = (nums[m1] + nums[m2]) / 2
else:
m = N / 2
m = int(m) - 1
median = nums[m]
return median
|
3755d4436fe66c5e56d5297c2e2b2e59763caa90
| 89,252
|
def toDictWithShieldsKeys(count, color, logoName, style) :
"""Creates a Python dictionary with all of the necessary
keys for a Shields endpoint.
Keyword arguments:
count - The count of the number of repositories using an action.
color - The color for the data portion of the badge.
logoName - A named logo or None for no logo in badge.
style - The name of the shields style to use or None for Shields default.
"""
d = {
"schemaVersion" : 1,
"label" : "used by",
"message" : count,
"color" : color
}
if logoName != None :
d["namedLogo"] = logoName
# The GitHub Actions logo in its default color
# is difficult to see on top of the grey left side,
# so if that is the logo set its color to white.
if logoName == "githubactions" :
d["logoColor"] = "#fff"
if style != None :
d["style"] = style
return d
|
45e530bfc00e881558c88a21cb29ed0ef8ab928e
| 89,254
|
def find_range(array, win):
""" Find indices corresponding to win[0] and win[1] inside array.
Args:
array: <list> an array of values sorted in descending order
win: <tuple> window ranges
Returns:
i0: <int> index of the first window limit
i1: <int> index of the second window limit
"""
a = array[:]
i0 = None
i1 = None
for idx, val in enumerate(a):
if i0 is None and win[0] >= val:
i0 = idx
if i1 is None and win[1] >= val:
i1 = idx
return i0, i1
|
ffd5e5c944dd4e0ec0c6f9e525e679449c7d1c25
| 89,257
|
def hhmm(time):
"""
textual representation of time in format HH:MM
"""
return time.strftime("%H:%M") if time else ''
|
bafaa88605f4da3ec4e656e270ff485734b26760
| 89,258
|
def is_ann_profile(profile):
""" Check if a profile string is for DM annihilation """
tokens = profile.split('_')
return tokens[-1] in ['point', 'map', 'radial']
|
884aa10b8e3bce626fd1ed18b3b580531f347d9e
| 89,266
|
def dict_delta(dict_a, dict_b):
"""
recursively compares two dictionaries, returns the dictionary of differences.
aka retval = dict_b - dict_a
"""
result = dict()
for k in dict_b:
if k in dict_a:
if isinstance(dict_a[k], dict) and isinstance(dict_b[k], dict):
comp_res = dict_delta(dict_a[k], dict_b[k])
if len(comp_res) > 0:
result[k] = comp_res
elif dict_a[k] != dict_b[k]:
result[k] = dict_b[k]
else:
result[k] = dict_b[k]
return result
|
7366eeb10515eadb6e7e412230179dff0a1e09c6
| 89,272
|
def create_sparse_dataset(py_obj, h_group, name, **kwargs):
""" dumps an sparse array to h5py file
Parameters
----------
py_obj (scipy.sparse.csr_matrix,scipy.sparse.csc_matrix, scipy.sparse.bsr_matrix):
python object to dump
h_group (h5.File.group):
group to dump data into.
name (str):
the name of the resulting dataset
kwargs (dict):
keyword arguments to be passed to create_dataset function
Returns:
Group and list of subitems to dump into
"""
h_sparsegroup = h_group.create_group(name)
return h_sparsegroup,(
('data',py_obj.data,{},kwargs),
('indices',py_obj.indices,{},kwargs),
('indptr',py_obj.indptr,{},kwargs),
('shape',py_obj.shape,{},kwargs)
)
|
69e48d8b577334c63141c726dc44466ec6eb107c
| 89,275
|
import hashlib
def get_digest(content):
"""Get the MD5 has of the content, suitable for use as the etag"""
return "{0}".format(hashlib.md5(content).hexdigest())
|
62e2cb345662aaa39547c9a55c8e38603d2f5186
| 89,281
|
def quickSort(alist):
"""
Quick sort algrithm, In-place version.
"""
def partition(alist, left, right):
pivot = alist[right - 1]
i = left - 1
for j in range(left, right):
if alist[j] < pivot:
i += 1
(alist[i], alist[j]) = (alist[j], alist[i])
if alist[right - 1] < alist[i + 1]:
(alist[i + 1], alist[right - 1]) = (alist[right - 1], alist[i + 1])
return i + 1
def sort(alist, left, right):
if left < right:
storeIndex = partition(alist, left, right)
sort(alist, left, storeIndex)
sort(alist, storeIndex + 1, right)
sort(alist, 0, len(alist))
return alist
|
872f15cd70dd48e3b1550c648973225e4975d7e0
| 89,283
|
def maxBit(int_val):
"""Return power of 2 for highest bit set for integer"""
length = 0
count = 0
while (int_val):
count += (int_val & 1)
length += 1
int_val >>= 1
return length - 1
|
77062fa127f8217079e6e4b716df90147ff8405f
| 89,284
|
def meraki_get_radio_settings(meraki):
"""Query the Meraki API for the current radio settings."""
path = meraki.construct_path("get_one", custom={"serial": meraki.params["serial"]})
return meraki.request(path, method="GET")
|
b17e707ceaf645722f5697554433a50d4eeb805a
| 89,285
|
def instance_name(msg_name, id):
"""Generate the name of an instance from a msg name"""
return "_" + msg_name.lower() + str(id)
|
55a68feb83fb9160dac60372d887edd77c485606
| 89,288
|
def logistic_est_loss_deriv(y_est, y):
"""
Compute the derivative of the logistic loss with
respect to its input variable, given only the
output y_est of the logistic function
Parameters
----------
y_est: float
Estimated output of the logistic function
y: float
Target value
Returns
-------
float:
Derivative of logistic loss with respect to input
*u* to logistic function
"""
return y_est-y
|
6a0682db00763097f444f6b258f2de767a9bc9b0
| 89,291
|
import requests
import json
def get_weather(city_name, weather_api):
"""
given a city name and api key for Open Weather Map, return local weather conditions
:param city_name: name of the city
:type city_name: str
:param weather_api: Open Weather Map API
:type weather_api: str
:return: results dictionary
"""
response = requests.get(
"https://community-open-weather-map.p.rapidapi.com/weather?mode=json&q={}".format(city_name),
headers={
"X-RapidAPI-Host": "community-open-weather-map.p.rapidapi.com",
"X-RapidAPI-Key": weather_api
})
content = json.loads(response.content.decode('utf8').replace("'", '"'))
res_dict = dict()
res_dict['humidity'] = content['main']['humidity']
res_dict['temp'] = round((content['main']['temp'] - 273.15), 1)
res_dict['temp_max'] = round((content['main']['temp_max'] - 273.15), 1)
res_dict['temp_min'] = round((content['main']['temp_min'] - 273.15), 1)
res_dict['weather'] = content['weather'][0]['main']
return res_dict
|
9cbe5e17120b03fbefa79c79beff571a8be55360
| 89,292
|
def get_hardware_info(butler, run_num):
"""Return the hardware type and hardware id for a given run
Parameters
----------
butler : `Bulter`
The data Butler
run_num : `str`
The number number we are reading
Returns
-------
htype : `str`
The hardware type, either 'LCA-10134' (aka full camera) or
'LCA-11021' (single raft)
hid : `str`
The hardware id, e.g., RMT-004
"""
rafts = butler.queryMetadata('raw', 'raftname', dict(run=run_num))
if len(rafts) > 1:
htype = 'LCA-10134'
hid = 'LCA-10134-0001'
else:
htype = 'LCA-11021'
hid = rafts[0]
return (htype, hid)
|
e2b44329590c899e05cb687049ef633811f317c8
| 89,296
|
import re
def get_tensor_names(string_obj):
"""
Get tensor names from the given string.
:param obj: a string representing an object from gc.get_objects()
:return: the name of the tensor variable
>>> names = get_tensor_names(log2)
>>> assert len(names) == 3
>>> assert names[0] == "tensor1"
>>> assert ['tensor1', 'tensor2', 'tensor3'] == names
>>> names = get_tensor_names(log1)
>>> assert len(names) == 1
>>> assert ["tensor1"] == names
"""
# There can be more than a single tensor name in the string.
# This pattern does not overlap in the object from gc.
pattern = "'(\w+)': tensor\("
# pos = string_obj.find(pattern)
return re.findall(pattern, string_obj)
|
d7b036aa2545cf16d088c29a724f4d1cebab56b1
| 89,297
|
def format_to_time_range(begin_hour, begin_minute, end_hour, end_minute):
"""
ζ ΌεΌεθ΅·ε§ζιθη΅ζζιηΊζιη―εοΌθΌΈε
₯θ΅·ε§ηε°ζθειοΌη΅ζηε°ζθειοΌζ ΌεΌεηΊ
{:02}:{:02} - {:02}:{:02}, e.g 07:22 - 22:17
ε¦ζθ³ζζ²ζ硦ι½οΌζεε³ None
Args:
begin_hour(int): ιε§ηε°ζ e.g 7
begin_minute(int): ιε§ηει e.g 22
end_hour(int):η΅ζηε°ζ e.g 12
end_minute(int):η΅ζηει e.g 45
Returns:
time_range(str): εε³ζιη―εηζ ΌεΌ e.g 07:22 - 22:17
"""
TIME_FORMAT = "{:02d}:{:02d}"
time_range = None
if begin_hour and end_hour:
begin_biz_time = begin_hour
end_biz_time = end_hour
if begin_minute:
begin_biz_time = TIME_FORMAT.format(begin_biz_time, begin_minute)
else:
begin_biz_time = TIME_FORMAT.format(begin_biz_time, 0)
if end_minute:
end_biz_time = TIME_FORMAT.format(end_biz_time, end_minute)
else:
end_biz_time = TIME_FORMAT.format(end_biz_time, 0)
time_range = '{} - {}'.format(begin_biz_time, end_biz_time)
return time_range
|
150d02f8ea638ed697b932738844ec0d0ab1c0a6
| 89,299
|
import pathlib
import io
def source(request, filepath_pseudos):
"""Return a pseudopotential, eiter as ``str``, ``Path`` or ``io.BytesIO``."""
filepath_pseudo = pathlib.Path(filepath_pseudos()) / 'Ar.upf'
if request.param is str:
return str(filepath_pseudo)
if request.param is pathlib.Path:
return filepath_pseudo
return io.BytesIO(filepath_pseudo.read_bytes())
|
593ede3627b2e43872a94f1079ab478d9f8133a1
| 89,300
|
import uuid
def generate_page_uuid(url: str) -> str:
"""Generate a consistent UUID based on the MD5 hash of a URL."""
return str(uuid.uuid3(uuid.NAMESPACE_URL, url))
|
3f975766a42833f90324e885210cc08b3ad706a8
| 89,305
|
def compute_ks_for_conv2d(w_in: int, w_out: int, padding: int=1) -> int:
"""Compute the kernel size to use with conv2d when we want
the output tensor has smaller spatial dimensions, ie.
w_out < w_in.
We assume the filter has stride=1.
Computation is based on the formula
w_out = floor(w_in - k + 2p) + 1
We get only positive integer for k only if:
w_out - w_in < 2p-1
"""
assert w_out - w_in < 2*padding-1, "No valid kernel size is possible"
c = w_out - w_in - 2*padding -1
return -c
|
277781ab8b98cf402ad7083126c4c9ec21201868
| 89,308
|
def parse_media_type(file_ext, EXT):
"""
Given a file extension, get the type of media
One of 'photo', 'video' or 'remove'
file_ext {str}: file extension to parse
EXT {Extension}: Extension object
type of media {str}
"""
for attr_name in [x for x in dir(EXT) if not x.startswith('__')]:
if file_ext in getattr(EXT, attr_name):
return attr_name
raise Exception(f'Invalid extension: {file_ext}')
|
442a140079b73a1f484e23fb7e3317d68db5c007
| 89,310
|
def PoolVec3Array_to_list(array):
"""Return a copy of the array as a list of 3-element lists of floats.
This is not efficient."""
result = []
for elt in array:
v3 = [elt.x, elt.y, elt.z]
result.append(v3)
return result
|
21421e8fb4927a33c5c8cc2d5a4e35a91ae6e23a
| 89,312
|
from unittest.mock import call
def PostMultiLineNotice_Call( message ):
"""Return a mock.call object for a call to vimsupport.PostMultiLineNotice with
the supplied message"""
return call( 'echohl WarningMsg | echo \''
+ message +
'\' | echohl None' )
|
54e238913c05e1e2a32e7943794273a754d069a9
| 89,314
|
import inspect
def function_exists(module, function):
"""
Check Python module for specificed function.
:param module: Python module
:type module: ``types.ModuleType.``
:param function: Name of the Python function
:type ``str``
:return: ``bool``
"""
return hasattr(module, function) and any(
function in f for f, _ in inspect.getmembers(
module, inspect.isroutine))
|
484a3700acbad78fd3138a2056babb53d7bf9283
| 89,319
|
import decimal
def remove_exponent(d: decimal.Decimal) -> decimal.Decimal:
"""
Util function for removing the exponent and trailing zeroes of a decimal.
From https://docs.python.org/3/library/decimal.html#decimal-faq
"""
return d.quantize(decimal.Decimal(1)) if d == d.to_integral() else d.normalize()
|
31ac4c6ba54f4338670e64dc4bf68e8906343669
| 89,320
|
import re
def find_problem_name(contents):
"""
Find the name of the problem by reading the first comments if it exists.
If no comments are found 'gdproblem' is used by default.
"""
pattern = re.compile(r"(?<=#).*?\n", re.DOTALL)
match = pattern.search(contents)
if match:
return match.group().strip()
return 'gdproblem'
|
665eefc3265f6ef5b4547c3fb2e2dbbd2ab40fdc
| 89,321
|
def get_content_type(mimetype, charset):
"""Return the full content type string with charset for a mimetype.
If the mimetype represents text the charset will be appended as charset
parameter, otherwise the mimetype is returned unchanged.
:param mimetype: the mimetype to be used as content type.
:param charset: the charset to be appended in case it was a text mimetype.
:return: the content type.
"""
if mimetype.startswith('text/') or \
mimetype == 'application/xml' or \
(mimetype.startswith('application/') and
mimetype.endswith('+xml')):
mimetype += '; charset=' + charset
return mimetype
|
c1a427b6b21f5e621e5eb38113619bbe5372c1d1
| 89,322
|
def num_physical_imei_shards(conn):
"""
Helper function to return the number of physical shards for IMEI-shared partitions.
Arguments:
conn: dirbs db connection object
Returns:
number of physical imei shard in the schema
"""
with conn.cursor() as cursor:
cursor.execute('SELECT phys_shards FROM schema_metadata')
return cursor.fetchone()[0]
|
ef31d9bff061c006c636840c387b29ed81d96fea
| 89,323
|
def _time_auto_unit(time_max):
"""
Automatically set the unit for time axis according to the absolute maximum
of the input data. This is a separate function for ease of testing and for
use across different plots.
Parameters
----------
time_max : float
Absolute maximum of the time data in seconds
"""
if time_max == 0:
unit = 's'
elif time_max < 1e-3:
unit = 'mus'
elif time_max < 1:
unit = 'ms'
else:
unit = 's'
return unit
|
49bfcd56e6765a38da6fe88ba72e15b78554c1f8
| 89,324
|
def get_field(DataModelClass, field_name):
""" Returns a SQLAlchemy Field from a field name such as 'name' or 'parent.name'.
Returns None if no field exists by that field name.
"""
# Handle hierarchical field names such as 'parent.name'
if '.' in field_name:
relationship_name, field_name = field_name.split('.', 1)
relationship = getattr(DataModelClass, relationship_name)
return get_field(relationship.property.mapper.entity, field_name)
# Handle flat field names such as 'name'
return getattr(DataModelClass, field_name, None)
|
3052906068e2a8ead2a20f70010d623aeb164a70
| 89,325
|
def sym_has_params(sym, input_names):
""" Judge if a model has parameters.
Args:
sym: A Symbol instance of the model.
input_names: A list of input tensor names.
Return:
True for the model has parameters and Flase for not.
"""
args = set(sym.list_arguments())
params = args - set(input_names)
ret = True
if not params:
#if len(params) == 0:
ret = False
return ret
|
d06d674e485950561631cb5659f455cf36fa47f8
| 89,327
|
def containsConflictMarker(text):
""" Returns true if there is a conflict marker in the text. """
return "/!\\ '''Edit conflict" in text
|
5aa6173b02d0d6ff4c92f1b222d3b25c767e44fb
| 89,328
|
from operator import inv
def inverse(matrix):
""" Returns the inverse of a given matrix.
Args
---
`matrix : np.array` A numpy matrix
Returns
---
`inv : np.array` The inverse matrix
"""
invMatrix = inv(matrix)
return invMatrix
|
26bb81570ad3fb158c33a4bec79a3b7ec3e6b3ba
| 89,341
|
def get_num_tablets_per_dose(dose, strength, divisions=1):
"""Calculates the number of tablets to give based on the dose given and the strength of the tablets.
Tablets can be divided in quarters.
:params:
dose (Float) - The dose in mg of what the patient should be receiving
strength (Float) - The strength in mg of the chosen tablets.
divisions (Int) - the number of sections you can divide your tablet into
:returns:
Number of tablets per dose (Float) - fixed to one of the points of the divisions
>>> get_num_tablets_per_dose(120, 100, 2)
1.0
>>> get_num_tablets_per_dose(240, 100, 2)
2.5
#Divide into quarters
>>> get_num_tablets_per_dose(120, 100, 4)
1.25
"""
num_tabs = dose/strength
return round(num_tabs * divisions)/divisions
|
0824bf99af4c0dc83414e0f5550d9edf2b671ef9
| 89,342
|
import math
def penalize_time(action_log):
"""Returns penalization factor for time taken."""
return 1 / math.log(len(action_log))
|
2f597db5382666dd5573a07b5b7117e1755c0007
| 89,344
|
from pathlib import Path
from typing import Tuple
from typing import List
def txt_of_ids_to_list(path: Path) -> Tuple[List[str], List[str]]:
"""Reads a .txt file with a slide_id,case_id per row and returns two lists of slide ids and case ids"""
content_slide: List = []
content_patient: List = []
with open(path, "r") as f:
while line := f.readline().rstrip():
patient, slide = line.split(',')
content_slide.append(slide)
content_patient.append(patient)
return content_patient, content_slide
|
38d0efa3689185ed24cf7a9e5082c7040c4370f9
| 89,345
|
import uuid
def create_check_mode_user(console, create_props, update_props):
"""
Create and return a fake local User object.
This is used when a user needs to be created in check mode.
"""
type_ = create_props['type']
props = dict()
# Defaults for some read-only properties
if type_ == 'pattern-based':
props['user-pattern-uri'] = 'fake-uri-{0}'.format(uuid.uuid4())
props['password-expires'] = -1
props['user-roles'] = []
props['replication-overwrite-possible'] = False
props.update(create_props)
props.update(update_props)
user_oid = 'fake-{0}'.format(uuid.uuid4())
user = console.users.resource_object(user_oid, props=props)
return user
|
759d7c3e41d8305b5b7cde861f2c420f44dd20ff
| 89,347
|
import re
def check_urn(urn_string: str) -> str:
"""
Checks that the urn string follows the correct pattern.
Parameters
----------
urn_string : str
Raises
------
ValueError
This raises an exception for a poorly formed or unmapped CRSD urn.
"""
if not isinstance(urn_string, str):
raise TypeError(
'Expected a urn input of string type, got type {}'.format(type(urn_string)))
the_match = re.match(r'^\d.\d.\d$', urn_string)
if the_match is not None:
urn_string = 'urn:CRSD:{}'.format(urn_string)
the_match = re.match(r'^urn:CRSD:\d.\d.\d$', urn_string)
if the_match is None:
raise ValueError(
'Input provided as `{}`,\nbut should be of the form '
'`urn:CRSD:<major>.<minor>.<release>'.format(urn_string))
return urn_string
|
9ba9e461eb42710ae848c6d220ea128b91a9efbf
| 89,349
|
def duplicates(iterable):
"""Return duplicated (hashable) items from iterable preserving order.
>>> duplicates(iter([1, 2, 2, 3, 1]))
[2, 1]
"""
seen = set()
return [i for i in iterable if i in seen or seen.add(i)]
|
ce7021ae22d1b740990d460c68fbab77b0e765e8
| 89,351
|
def length(*args, **kwargs) -> int:
"""
Computes the total string length for the args and kwargs passed
:param args:
:param kwargs:
:return:
"""
return sum(len(x) for x in list(args) + list(kwargs.values()))
|
157c3f401e298dcd86d75009d3a61f90cb78ae8d
| 89,353
|
def doc_name(s):
"""
Returns the operator's name in a human readable format for Blender's operator search.
Removes POWER_SEQUENCER_OT_ from an operator's identifier
and converts it to title case.
"""
out = s.split("_OT")[-1]
out = out.replace("_", " ").lstrip().title()
return out
|
890d5121866236c7d079f8552ef500477728bfd7
| 89,361
|
def get_primality_testing_rounds(num: int) -> int:
"""λ°λ¬-λΌλΉ μμ νμ λ²μ μνν λ, λ°λ³΅ν νμλ₯Ό ꡬνλ€.
1300 <= num: k = 2
850 <= num: k = 3
650 <= num: k = 4
300 <= num: k = 10
300 > num: k = 30
ref : http://cacr.uwaterloo.ca/hac/about/chap4.pdf
"""
nbits = num.bit_length()
if nbits >= 1300:
return 2
if nbits >= 850:
return 3
if nbits >= 650:
return 4
if nbits >= 300:
return 10
return 30
|
fa99d1f5c6ec82d6aea16d10001db8090816f883
| 89,379
|
def fileTOdictionary(filename):
""" This function takes a file and converts it to a sparse matrix nested dictionary, [age][icd_column_number]."""
fh = open(filename)
header = next(fh)
data_dict = {}
numlist = range(1,36)
for line in fh:
line = line.strip().split(",")
for x in numlist:
#dictionary below has the following structure
#{"0":{"1":datavalue, "2":dataValue....}, "2.5":{"1":datavalue, "2":dataValue....}}
#So we have the key being the age, and the value being a nested dict with the key being the ICD column number and the value being the number from the data set
if str(line[4]) not in data_dict:
data_dict[str(line[4])] = {}
data_dict[str(line[4])][str(x)] = line[x+8]
else:
data_dict[str(line[4])][str(x)] = line[x+8]
print(data_dict)
return data_dict
|
07503c97e211b789b576aeddaa06b85b57aff9d8
| 89,382
|
def homogeneous_to_cartesian(homogeneous_point):
"""
Convert Homogeneous coordinates to Cartesian coordinates
:param homogeneous_point: Homogeneous coordinates
:return: Cartesian coordinates
"""
return homogeneous_point[:, :-1]
|
7a2e3571023d28024d8caf1f3626abb051193a32
| 89,384
|
import random
def walk(n, seed=1):
"""
Generate a random walk.
The walk is initialized at zero, and this initial state is included in
the walk.
:param n: the number of steps of the walk.
:param seed: the seed of for the random number generator. Each walk has an
independent random number generator.
"""
# we create an independent RNG for the function
rng = random.Random()
rng.seed(seed)
steps = [0]
for i in range(n):
if rng.uniform(-1,+1) < 0:
steps.append(steps[-1] - 1)
else:
steps.append(steps[-1] + 1)
return steps
|
082c770a3dfaa5e2d4e2aea62c78df99f5f8d099
| 89,396
|
from typing import Any
from typing import Hashable
def is_hashable(obj: Any) -> bool:
"""
Checks if the given object is hashable.
:param obj: The object to check.
:return: True if the object is hashable,
False if not.
"""
return isinstance(obj, Hashable)
|
07112787239ac673dde5ebe536458cbf8c5fb202
| 89,397
|
def tuples( relation ):
""" Returns the entire relation as a Python list of tuples.
"""
return [t for t in relation]
|
ba94f250b12c8e2c781ce9086b73352025d85258
| 89,404
|
def _is_ipv6_addr_link_local(ip_addr):
"""Indicates if a given IPv6 address is link-local"""
return ip_addr.lower().startswith('fe80::')
|
c065b37cee7805d85ed72d341388bc5f6078f947
| 89,408
|
def _find_pos(obj) -> str:
"""
Pass in a Dataset/DataArray to find the coordinate position (rho/u/v)
of the data to be worked with.
If obj is a Dataset, 'rho' will be searched first, and then u/v.
pos = _find_pos(obj)
"""
pos = None
if 'eta_rho' in obj.dims or 'xi_rho' in obj.dims:
pos = '_rho'
elif 'eta_psi' in obj.dims or 'xi_psi' in obj.dims:
pos = '_psi'
elif 'eta_u' in obj.dims or 'xi_u' in obj.dims:
pos = '_u'
elif 'eta_v' in obj.dims or 'xi_v' in obj.dims:
pos = '_v'
if pos is None:
raise ValueError('Unknown coordinate position (rho/psi/u/v).')
return pos
|
eb3593be242122aefad0f37b619c343a26b6dca7
| 89,410
|
def ragged_to_dense(rt_input, default_value=None, shape=None):
"""Create a dense tensor from a ragged tensor."""
return rt_input.to_tensor(default_value=default_value, shape=shape)
|
c876cd61af62142b99d2206f410177660cd01d99
| 89,411
|
def load_from_file(path):
"""
Load file in binary mode.
Args:
path(str): Path of an existed file.
Returns:
bytes: Content of file.
"""
with open(path, 'rb') as f:
data = f.read()
return data
|
0cd555334baf14064bbe52754734c200bb4b43d9
| 89,413
|
def R0(beta, d, nu, mu1):
"""
Basic reproduction number.
Parameters:
-----------
beta
average number of adequate contacts per unit time with infectious individuals
d
natural death rate
nu
disease induced death rate
mu1
Maximum recovery rate
"""
return beta / (d + nu + mu1)
|
0dac9fe5f7e1a879a1afcb469ae5a8f74eff8825
| 89,415
|
def normalize_images(images):
"""
Normalizes each pixel in an image to a value between 0-1 by dividing each pixel by 255.0
"""
return images / 255.0
|
ca0e1b0539fb89dc443e5c530bda3c902368c4c7
| 89,417
|
from typing import List
def flatten_lines(lines: List[str]) -> List[str]:
"""
Combines lines that were split with line continuations
i.e.
this is \
one line
=> this is one line
"""
ret = []
n = 0
while n < len(lines):
line = lines[n]
while line.endswith("\\") and n < len(lines) - 1:
n += 1
next_line = lines[n]
line = line[:-1] + " " + next_line.strip()
ret.append(line)
n += 1
return ret
|
f2a671efc21aba010af0cc40ac335d22f02118ee
| 89,420
|
import torch
def normalize_tensor(in_feat, eps=1e-10):
"""L2 normalization.
Args:
in_feat (Tensor): Tensor with shape [N, C, H, W].
eps (float, optional): Epsilon value to avoid computation error.
Defaults to 1e-10.
Returns:
Tensor: Tensor after L2 normalization per-instance.
"""
norm_factor = torch.sqrt(torch.sum(in_feat**2, dim=1, keepdim=True))
return in_feat / (norm_factor + eps)
|
dbb5b42a848f95fe6d23bc8a06722b84fa2b4b7a
| 89,422
|
def entity_seqs_equal(expected, predicted):
"""
Returns true if the expected entities and predicted entities all match, returns
false otherwise. Note that for entity comparison, we compare that the span, text,
and type of all the entities match.
Args:
expected (list of core.Entity): A list of the expected entities for some query
predicted (list of core.Entity): A list of the predicted entities for some query
"""
if len(expected) != len(predicted):
return False
for expected_entity, predicted_entity in zip(expected, predicted):
if expected_entity.entity.type != predicted_entity.entity.type:
return False
if expected_entity.span != predicted_entity.span:
return False
if expected_entity.text != predicted_entity.text:
return False
return True
|
2060aeaeecb3e210020466437a634aa6c01914e5
| 89,423
|
def _is_scheduling_fair(enqueued_slots, dequeued_slots):
"""
We enqueued same number of requests for every slot.
Assert correct order, e.g.
>>> enqueued = ['a', 'b', 'c'] * 2
>>> correct = ['a', 'c', 'b', 'b', 'a', 'c']
>>> incorrect = ['a', 'a', 'b', 'c', 'c', 'b']
>>> _is_scheduling_fair(enqueued, correct)
True
>>> _is_scheduling_fair(enqueued, incorrect)
False
"""
if len(dequeued_slots) != len(enqueued_slots):
return False
slots_number = len(set(enqueued_slots))
for i in range(0, len(dequeued_slots), slots_number):
part = dequeued_slots[i:i + slots_number]
if len(part) != len(set(part)):
return False
return True
|
b5ad2c8c95be3da39b4348323fda29d442a9c1ad
| 89,425
|
def get_embedded_items(result_collection):
"""
Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of embedded items with each item in the returned list
considered a result object.
'result_collection' a JSON object returned by a previous API
call. The parameter 'embed_items' must have been True when the
result_collection was originally requested.May not be None.
Returns a list, which may be empty if no embedded items were found.
"""
# Argument error checking.
assert result_collection is not None
result = []
embedded_objects = result_collection.get('_embedded')
if embedded_objects is not None:
# Handle being passed a non-collection gracefully.
result = embedded_objects.get('items', result)
return result
|
e0f904ad642b3f5b114239ca3d7aa99bdbd675af
| 89,428
|
def tidyBBB( BBB:str ) -> str:
"""
Change book codes like SA1 to the conventional 1SA.
BBB is always three characters starting with an UPPERCASE LETTER.
"""
return (BBB[2]+BBB[:2]) if BBB[2].isdigit() else BBB
|
58775330c3508f9fab49e474de945c8c213b240e
| 89,430
|
import logging
def str2ll(level):
""" Converts the log level argument to a numeric value.
Throws an exception if conversion can't be done.
Copied from the logging howto documentation
"""
retval = getattr(logging, level.upper(), None)
if not isinstance(retval, int):
raise ValueError('Invalid log level: %s' % level)
return retval
|
3cd140306b9362e1880404db637dd77b607b0d70
| 89,438
|
def voltage_to_direction(voltage: float) -> str:
"""
Converts an anolog voltage to a direction
Arguments:
- voltage: Voltage float value form the MCP3008. values are between 0 and 3.3V
Returns:
- Direction coresponding to an input voltage
"""
if voltage < 0.20625 or voltage > 3.09375:
return "N"
elif 0.20625 <= voltage < 0.61875:
return "NE"
elif 0.61875 <= voltage < 1.03125:
return "E"
elif 1.03125 <= voltage < 1.44375:
return "SE"
elif 1.44375 <= voltage < 1.85625:
return "S"
elif 1.85625 <= voltage < 2.26875:
return "SW"
elif 2.26875 <= voltage < 2.68125:
return "W"
else:
return "NW"
|
32cf5bd05f1d3d63a0c6aee35176be0de33dfa67
| 89,444
|
def mass_hpa_wing(
span,
chord,
vehicle_mass,
n_ribs, # You should optimize on this, there's a trade between rib weight and LE sheeting weight!
skin_density,
n_wing_sections=1, # defaults to a single-section wing (be careful: can you disassemble/transport this?)
ultimate_load_factor=1.75, # default taken from Daedalus design
type="cantilevered", # "cantilevered", "one-wire", "multi-wire"
t_over_c=0.128, # default from DAE11
include_spar=True,
# Should we include the mass of the spar? Useful if you want to do your own primary structure calculations.
):
"""
Finds the mass of the wing structure of a human powered aircraft (HPA), following Juan Cruz's correlations in
http://journals.sfu.ca/ts/index.php/ts/article/viewFile/760/718
:param span: wing span [m]
:param chord: wing mean chord [m]
:param vehicle_mass: aircraft gross weight [kg]
:param n_ribs: number of ribs in the wing
:param n_wing_sections: number of wing sections or panels (for disassembly?)
:param ultimate_load_factor: ultimate load factor [unitless]
:param type: Type of bracing: "cantilevered", "one-wire", "multi-wire"
:param t_over_c: wing airfoil thickness-to-chord ratio
:param include_spar: Should we include the mass of the spar? Useful if you want to do your own primary structure calculations. [boolean]
:return: Wing structure mass [kg]
"""
### Primary structure
if include_spar:
if type == "cantilevered":
mass_primary_spar = (
(span * 1.17e-1 + span ** 2 * 1.10e-2) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
)
elif type == "one-wire":
mass_primary_spar = (
(span * 3.10e-2 + span ** 2 * 7.56e-3) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
)
elif type == "multi-wire":
mass_primary_spar = (
(span * 1.35e-1 + span ** 2 * 1.68e-3) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
)
else:
raise ValueError("Bad input for 'type'!")
mass_primary = mass_primary_spar * (
11382.3 / 9222.2) # accounts for rear spar, struts, fittings, kevlar x-bracing, and wing-fuselage mounts
else:
mass_primary = 0
### Secondary structure
ratio_of_rib_spacing_to_chord = (span / n_ribs) / chord
n_end_ribs = 2 * n_wing_sections - 2
area = span * chord
# Rib mass
W_wr = n_ribs * (chord ** 2 * t_over_c * 5.50e-2 + chord * 1.91e-3) * 1.3
# x1.3 scales to estimates from structures subteam
# Half rib mass
W_whr = (n_ribs - 1) * skin_density * chord * 0.65 * 0.072
# 40% of cross sectional area, same construction as skin panels
# End rib mass
W_wer = n_end_ribs * (chord ** 2 * t_over_c * 6.62e-1 + chord * 6.57e-3)
# LE sheeting mass
# W_wLE = 0.456/2 * (span ** 2 * ratio_of_rib_spacing_to_chord ** (4 / 3) / span)
# Skin Panel Mass
W_wsp = area * skin_density * 1.05 / 1.3 # assumed constant thickness from 0.9c around LE to 0.15c
# 1.3 removes the correction factor included later - prevents double counting
# TE mass
W_wTE = span * 2.77e-2
# Covering
W_wc = area * 0.076 / 1.3 # 0.033 kg/m2 Tedlar covering on 2 sides, with 1.1 coverage factor
mass_secondary = W_wr + W_wer + W_wsp + W_wTE + W_wc + W_whr
return mass_primary + mass_secondary
|
8e55491a79729a3baa7e741daef92637ebaaf000
| 89,446
|
def annotate(items, fn, sort_fn=None, reverse=True):
"""Return a dict with elements of items as keys and their values under fn as their values,
sorted by their values under sort_fn. If sort_fn isn't specified, use fn by default.
>>> annotate([4, 3, 2, 1], fn=lambda x: x**2, sort_fn=lambda x: (-2)**x, reverse=False)
{3: 9, 1: 1, 2: 4, 4: 16} # Sorted by odd numbers decreasing, then even numbers increasing.
"""
if sort_fn is None:
sort_fn = fn
return {k: fn(k) for k in sorted(items, key=sort_fn, reverse=reverse)}
|
9713b818a3774278d5949c52b9deefb3afff8551
| 89,447
|
def _write_swift_version(repository_ctx, swiftc_path):
"""Write a file containing the current Swift version info
This is used to encode the current version of Swift as an input for caching
Args:
repository_ctx: The repository context.
swiftc_path: The `path` to the `swiftc` executable.
Returns:
The written file containing the version info
"""
result = repository_ctx.execute([swiftc_path, "-version"])
contents = "unknown"
if result.return_code == 0:
contents = result.stdout.strip()
filename = "swift_version"
repository_ctx.file(filename, contents, executable = False)
return filename
|
55eae8cc8e5aa52ed35d0f3a564372292c2cc664
| 89,449
|
def any(_):
"""Matches anything"""
return True
|
ccef36982fecea96d2e8e6e8b223fee4f4a303bb
| 89,450
|
import re
def get_total_tag_counts(tag_bed_file):
"""
Get total tag counts given the current experimental run
file should be a bed file.
"""
counts =0;
infile = open(tag_bed_file,'r');
for line in infile:
""" check to make sure not a header line """
if not re.match("track", line):
counts += 1;
infile.close();
return counts;
|
05b062d496f676b36ac0634f245c8b310217fd0a
| 89,452
|
def _env(env):
"""Parse multiline KEY=VALUE string into dict."""
return dict((key.strip(), val)
for line in env.strip().splitlines()
for key, _, val in [line.partition('=')])
|
95fa1f6f2630489bc34b29000e68c18dab435f69
| 89,459
|
from typing import List
import itertools
def weights_parameters(fields: List[str], weights: List[float]) -> List[str]:
"""Generate Solr field weight combinations for given field values and weights
e.g. for ["content", "title"] and [0.1, 1.0] it generates
["content^0.1 title^0.1", "content^0.1 title^1.0", "content^1.0 title0.1", "content^1.0 title^1.0"]
"""
fields_and_weights: List[List[str]] = [[f"{field}^{weight}" for weight in weights] for field in fields]
return [" ".join(combination) for combination in itertools.product(*fields_and_weights)]
|
03edefd58ebabea512371986413f1d62b371d421
| 89,462
|
def has_og_property(meta_tag, properties):
"""
Checks if the given meta tag has an attribute property equals to og:something,
something being in properties.
Returns:
None if the given tag "is" not og:something. something otherwise.
"""
try:
if not meta_tag["property"].startswith("og:"):
return None
_property = meta_tag["property"][len("og:"):] # remove og: from beginning of tag
if _property in properties:
return _property
except: # pylint: disable=bare-except
pass
return None
|
4d15b94b1140aabb656416b4359374417eac96d2
| 89,463
|
def generate_init_belief(state_space):
"""
Generate the initial belief distribution over states
Initially, there is uniform belief for all states where no machine in network is compromised
"""
num_valid_init_states = 0
for s in state_space:
if not s.has_compromised_machine():
num_valid_init_states += 1
init_prob = 1 / num_valid_init_states
init_belief = []
for s in state_space:
if not s.has_compromised_machine():
init_belief.append(init_prob)
else:
init_belief.append(0)
return init_belief
|
79f64436e6d64c6a4d20149b47b2abc464a14d5c
| 89,467
|
def char_count(word):
"""Return a dictionary with the incidence of each character."""
rdict = {}
chars = list(word) # turn string into list
for c in chars:
if c in rdict:
rdict[c] += 1
else:
rdict[c] = 1
return rdict
|
87cfee25ecfe0e8ca05f182504465d175b0a50a6
| 89,472
|
from typing import List
def loadMatrix(file_name: str) -> List[List[float]]:
"""
Import matrix from file.
Matrix is stored in row oriented list.
Such that each row is a nested list.
"""
with open(file_name, 'r') as file:
row_count = int(file.readline())
column_count = int(file.readline())
return [[float(file.readline()) for _ in range(column_count)]
for _ in range(row_count)]
|
860fca72aeb56ce2f30310a0db63c7b212ee2a3b
| 89,474
|
def isNotBlank(s):
"""Check if string is empty or whitespace-only
"""
return bool(s and s.strip())
|
363716d587132886e2c0d730f4f8c940dd933fe0
| 89,476
|
import struct
def itoby(v, length):
"""
Converts an unsigned integer to a bytearray of specified length.
"""
fmt = {1: "<B", 2: "<H", 4: "<L", 8: "<Q"}[length]
return bytearray(struct.pack(fmt, v))
|
baae5ecdb3ff287527c869649341f1879c4d69d1
| 89,477
|
def encode_special_characters(string: str) -> str:
"""Make string safe for urls as required by REST APIs"""
char_mapping = {"#": ";23", "&": ";26", "/": "|", "*": ";2A"}
for char, encoded_char in char_mapping.items():
string = string.replace(char, encoded_char)
return string
|
0d016c5fdb6013bbcb8b8e3ff1dbfb3205298590
| 89,478
|
import json
def convert_type(base_list):
"""
Convert a list of strings into their native python types
Example: ["pyon", "1", "1.5"] -> ["pyon", 1, 1.5]
:param base_list: List of strings
:return: Converted list of native types
"""
converted_list = []
for item in base_list:
try:
converted_list.append(json.loads(item))
except ValueError:
converted_list.append(item)
return converted_list
|
82fed790d7d7765cc0b9c2bddeb38083f4703ff4
| 89,484
|
def replace_special_characters(text):
"""Replace special characters in a string
Makes a copy of string 'text' with special characters (i.e.
non-alphanumeric) replaced by underscores, and spaces replaced
by hyphens.
This is useful for generating strings to use in HTML documents."""
ele = []
for c in list(str(text).lower()):
if c.isspace():
ch = "-"
elif not c.isalnum():
ch = "_"
else:
ch = c
ele.append(ch)
return "".join(ele)
|
3d15d18d9f54d228922c8e60745516090ac7cc25
| 89,487
|
import torch
def pick_valid_points(coord_input, nodata_value, boolean=False):
"""
Pick valid 3d points from provided ground-truth labels.
@param coord_input [B, C, N] or [C, N] tensor for 3D labels such as scene coordinates or depth.
@param nodata_value Scalar to indicate NODATA element of ground truth 3D labels.
@param boolean Return boolean variable or explicit index.
@return val_points [B, N] or [N, ] Boolean tensor or valid points index.
"""
batch_mode = True
if len(coord_input.shape) == 2:
# coord_input shape is [C, N], let's make it compatible
batch_mode = False
coord_input = coord_input.unsqueeze(0) # [B, C, N], with B = 1
val_points = torch.sum(coord_input == nodata_value, dim=1) == 0 # [B, N]
val_points = val_points.to(coord_input.device)
if not batch_mode:
val_points = val_points.squeeze(0) # [N, ]
if boolean:
pass
else:
val_points = torch.nonzero(val_points, as_tuple=True) # a tuple for rows and columns indices
return val_points
|
e6b2763281729f7d40c96f7a68bceb8c4bad3fa2
| 89,489
|
def ByteString2String(Bstring):
"""
Convenience function to convert a byte string to string.
"""
return str(Bstring, 'utf-8')
|
80f3949f52c8a35a2b3cfd3dc4fbff22bd2ac043
| 89,493
|
def fakultaet(integer):
"""berechnet die FakultΓ€t n! einer Zahl n
"""
if integer == 0:
return 1
erg = 1
for i in range(integer, 1, -1):
erg = erg*i
return erg
|
55b1f95c72360debbb682526df5e1225a3aa990a
| 89,495
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.