content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_angle_from_rotated_rect(rotrect):
"""
Computes the relative angle to the sub needed to align
to the rectangle along its long axis.
"""
# True if taller than wide
if rotrect[1][0] < rotrect[1][1]:
return rotrect[2]
return rotrect[2] + 90
|
220f90e0538387c7183801392b3aca813d8fa28b
| 66,167
|
def get_country_name_from_iso_code(data_dict, country_code):
"""
Returns the Passport-statndard coutnry name from a given country code (fast)
:param data_dict: Dictionary containing mappy of ISO-2 country codes to country names
:param country_code: the country code (two characters capitalized)
:return: The country name if found, otherwise the original country code (string)
"""
try:
country_nm = data_dict[str(country_code)]
return str(country_nm)
except:
return str(country_code)
|
fdaef38431156b5dbe528a2437588a1d549c81ba
| 66,172
|
import torch
def loss_TV(p,q):
"""
Total Variation loss function between two torch Tensors
:param p: 1st tensor
:param q: 2nd tensor
:return: sum of the absolute differences of each component
"""
return torch.sum(torch.abs(p-q)) # Already size-independent
|
edf57ef48dd9139ea0ea78fa5406f62ea1c98cf2
| 66,173
|
import socket
def connect_to_host(host, port):
"""
Init socket; connect to host at specified port
Keyword arguments:
host -- hostname (str)
port -- port number (int)
"""
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_socket.connect((host, port))
return control_socket
|
0e25249f71109c49e317d44fdf12a7aaf62d2d8f
| 66,180
|
def check_keys_exist(d, keys):
""" This function ensures the given keys are present in the dictionary. It
does not other validate the type, value, etc., of the keys or their
values. If a key is not present, a KeyError is raised.
The motivation behind this function is to verify that a config dictionary
read in at the beginning of a program contains all of the required values.
Thus, the program will immediately detect when a required config value is
not present and quit.
Input:
d (dict) : the dictionary
keys (list) : a list of keys to check
Returns:
list of string: a list of all programs which are not found
Raises:
KeyError: if any of the keys are not in the dictionary
"""
missing_keys = [k for k in keys if k not in d]
if len(missing_keys) > 0:
missing_keys = ' '.join(missing_keys)
msg = "The following keys were not found: " + missing_keys
raise KeyError(msg)
return missing_keys
|
6943550837883ba8494733913cc63577a63f0245
| 66,183
|
import re
def to_snake_case(camel_case_str):
"""
Returns `camel_case_str` (e.g. `SomeStr`) as snake_case (e.g. `some_str`)
:param camel_case_str: A string in `CamelCase`
:return: The string in `snake_case`
"""
els = [
el.lower()
for el in re.findall("[a-zA-Z][^A-Z_]*", camel_case_str)
if el != "_"
]
return "_".join(els)
|
06b6de266d148e6d8c3e6c39dbc87c5ec9d857d8
| 66,184
|
def cli_cosmosdb_mongodb_collection_throughput_migrate(client,
resource_group_name,
account_name,
database_name,
collection_name,
throughput_type):
"""Migrate an Azure Cosmos DB MongoDB collection throughput"""
if throughput_type == "autoscale":
return client.migrate_mongo_db_collection_to_autoscale(resource_group_name, account_name, database_name, collection_name)
return client.migrate_mongo_db_collection_to_manual_throughput(resource_group_name, account_name, database_name, collection_name)
|
f83f48c033bc73603b32867e512c349460d69003
| 66,187
|
def set_bitfield_bit(bitfield, i):
"""
Set the bit in ``bitfield`` at position ``i`` to ``1``.
"""
byte_index = i // 8
bit_index = i % 8
return (
bitfield[:byte_index] +
bytes([bitfield[byte_index] | (1 << bit_index)]) +
bitfield[byte_index+1:]
)
|
2e3425d09df75b655991d17ea4c6f59249ad78a4
| 66,189
|
def GTR(NdotHs, p_roughness, gamma=1.):
"""
Calculate the GTR microfacet distribution coefficient,given the
inner products between the surface normals and the half vectors and the
surface roughness.
Inputs:
NdotHs NxLx3 torch.tensor containing the inner products
p_roughness Nx1 torch.tensor containing the surface roughnesses
Outputs:
Ds NxLx1 torch.tensor containing the microfacet distributions
"""
cosNH2 = (NdotHs ** 2).clamp_(min=0., max=1.)
p_roughness2 = p_roughness ** 2
if gamma == 1.:
cs = (p_roughness2 - 1) / p_roughness2.log()
Ds = cs / (1 + (p_roughness2 - 1) * cosNH2 + (cosNH2 == 1).float())
Ds[cosNH2 == 1.] = (-1 / p_roughness2.log() / p_roughness2).repeat(cosNH2.shape[0],1,1)[cosNH2 == 1.]
else:
cs = (gamma - 1) * (p_roughness2 - 1) / (1 - p_roughness2 ** (1 - gamma))
Ds = cs / ((1 + (p_roughness2 - 1) * cosNH2) ** gamma)
return Ds
|
6c8aedb2036d5c82682c34a3112663148581f483
| 66,191
|
def is_defined(s, table):
"""
Test if a symbol or label is defined.
:param s: The symbol to look up.
:param table: A dictionary containing the labels and symbols.
:return: True if defined, False otherwise.
"""
try:
table[s] # Exploiting possible KeyError
return True
except KeyError:
return False
|
02b96a86d96da97c0b008dcaa7924c9e77a003f2
| 66,200
|
def Bcast_list(self, world, root=0):
"""Broadcast a list by pickling, sending, and unpickling.
This is slower than using numpy arrays and uppercase (Bcast) mpi4py routines.
Must be called collectively.
Parameters
----------
self : list
A list to broadcast.
world : mpi4py.MPI.Comm
MPI parallel communicator.
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : list
The broadcast list on every MPI rank.
"""
this = world.bcast(self, root=root)
return this
|
ac60ae4254ae99f515172d24773f106090c07f57
| 66,207
|
def add_file_to_dict(dictionary, file):
"""
Populates a dictionary with
key: file name
value: number of instances in the directory
Args:
dictionary: dictionary of file names
file: file name to be added to dictionary
Returns:
The modified dictionary
"""
# If file exists in dictionary
if file in dictionary:
# Add number of instances of the file
dictionary[file] += 1
# If file does not exist
else:
# Add it to our dictionary
dictionary[file] = 1
return dictionary
|
a7cc0955fd3195170c61a6e40524739caaae0cb9
| 66,213
|
import heapq
def place_items_in_square(items, t):
"""
Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])]
"""
# A minheap (because that's all that heapq supports :/)
# of the length of each row. Why this is important is because
# we'll be popping the largest rows when figuring out row displacements.
# Each item is a tuple of (t - |row|, y, [(xpos_1, item_1), ...]).
# Until the call to heapq.heapify(), the rows are ordered in
# increasing row number (y).
rows = [(t, y, []) for y in range(t)]
for item in items:
# Calculate the cell the item should fall in.
x = item % t
y = item // t
# Push the item to its corresponding row...
inverse_length, _, row_contents = rows[y]
heapq.heappush(row_contents, (x, item))
# Ensure the heap key is kept intact.
rows[y] = inverse_length - 1, y, row_contents
assert all(inv_len == t - len(rows) for inv_len, _, rows in rows)
heapq.heapify(rows)
# Return only rows that are populated.
return [row for row in rows if row[2]]
|
ba387625ca3f97d636934a204ebaa92adeded258
| 66,215
|
import base64
def enc_powershell(raw):
"""
Encode a PowerShell command into a form usable by powershell.exe -enc ...
"""
return base64.b64encode(b"".join([bytes([char]) + b"\x00" for char in bytes(raw, 'utf-8')])).decode("utf-8")
|
0f9879f96c4ae4b3091d2923b406d2793b986967
| 66,217
|
def partition(iterable, n):
"""
Partitions an iterable into tuples of size n. Expects the iterable length to be a
multiple of n.
partition('ABCDEF', 3) --> [('ABC', 'DEF')]
"""
assert len(iterable) % n == 0
args = [iter(iterable)] * n
return zip(*args)
|
fd656170589768f6fcb6368d47eafa1a53005722
| 66,224
|
import logging
def get_logger(name=None, default_level=logging.INFO):
"""
Create a Logger object to use instead of relying on print statements.
:param name: str name of logger. If None, then root logger is used.
:param default_level: Log Level
:return logger: A Configured logger object
"""
logger = logging.getLogger(name)
logger.setLevel(default_level)
ch = logging.StreamHandler()
ch.setLevel(default_level)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
|
785033ae46a0ad52b79565f95db13f6755d22505
| 66,226
|
import pickle
def load_object(fpath):
"""
Load a pickled object from file
"""
with open(fpath, 'rb') as i:
return pickle.load(i)
|
6ce2cb1135ccfdacb624a5a9b032b5309844f2bf
| 66,227
|
from typing import Union
def kelvin_to_celsius(k_degrees: Union[int, float]) -> int:
"""Returns kelvins converted to celsius
:param k_degrees: temperature in kelvins
:type k_degrees: float
:return: temperature converted to celsius without the fractional part
:rtype: int
"""
MIN_TEMP_KELVIN = 0
K_C_RATIO = 273.15
if not isinstance(k_degrees, (int, float)):
raise TypeError("Incorrect argument type of k_degrees. "
f"Expected {int} or {float}, "
f"got {type(k_degrees)} instead!")
if k_degrees < MIN_TEMP_KELVIN:
raise ValueError("Incorrect value of k_degrees. "
f"k_degrees cannot be lower than {MIN_TEMP_KELVIN}!")
celsius = int(k_degrees - K_C_RATIO)
return celsius
|
346d78325228e9eea8367beefcd25bb219e16c40
| 66,229
|
def extract_time(seconds):
"""
extract_time(seconds) -> String
Turn the time in seconds to a string containing the time formatted into
day, hours, minutes and seconds.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d == 0 and h == 0 and m == 0:
return "%ds" % s
if d == 0 and h == 0:
return "%dm %ds" % (m, s)
elif d == 0:
return "%dh %dm %ds" % (h, m, s)
else:
return "%dd %dh %dm %ds" % (d, h, m, s)
|
377626cd059853b028a528592f42089f907dad55
| 66,230
|
def _get_key_from_day(start, datetime):
"""Return a key encoding the passed date, starting the key with 'start'"""
return "%s/%4d-%02d-%02d" % (start, datetime.year,
datetime.month, datetime.day)
|
9550cbbbe8d710f4c60d2cad844438bfc0dc298a
| 66,233
|
def lerp(x, x0,x1,y0,y1):
"""
This function is a helper function to normalize values.
Mathematically, this function does a linear interpolation for x from
range [x0,x1] to [y0,y1].
see https://en.wikipedia.org/wiki/Linear_interpolation
Args:
x: Value to be interpolated
x0: Lower range of the original range.
x1: Higher range of the original range.
y0: Lower range of the targeted range.
y1: Higher range of the targeted range.
Returns:
float: interpolated value.
"""
# if not(x0 <= x <= x1):
# print x
x, x0,x1,y0,y1 = map(float, (x, x0,x1,y0,y1))
return y0+(x-x0)*(y1-y0)/(x1-x0)
|
978b7570899b1b88a2b365055eaa4da6be83f453
| 66,234
|
import re
def normalize_whitespace(text):
"""Normalizes whitespace in text.
Scanners that parse text generally need whitespace normalized, otherwise
metadata parsed from the text may be unreliable. This function normalizes
whitespace characters to a single space.
Args:
text: Text that needs whitespace normalized.
Returns:
Text with whitespace normalized.
"""
if isinstance(text, bytes):
text = re.sub(br'\s+', b' ', text)
text = re.sub(br'(^\s+|\s+$)', b'', text)
elif isinstance(text, str):
text = re.sub(r'\s+', ' ', text)
text = re.sub(r'(^\s+|\s+$)', '', text)
return text
|
e6dbed846975525e49d3a828268b56f6054cd020
| 66,239
|
def nice_language(language):
"""Format a :class:`~babelfish.Language` in a nice string with country name if any.
:param language: the language.
:type language: :class:`~babelfish.Language`
:return: a nice representation of the language.
:rtype: str
"""
if language.country is not None:
return '{name} ({country})'.format(name=language.name, country=language.country.name.capitalize())
return language.name
|
33c5ff5d4dc49903b6302a78dea25d63ccc47e96
| 66,242
|
def count_elements(_polymer):
"""Return a list of the polymer elements with the tuples sorted by count descending"""
count_dicts = { c: _polymer.count(c) for c in _polymer }
return sorted(count_dicts.items(), key = lambda p: p[1], reverse=True)
|
902304a8892b40288e47316b6d3e18d9285e3e96
| 66,246
|
def cls_name(instance):
"""Return the name of the class of the instance.
>>> cls_name({})
'dict'
>>> cls_name(AttributeError('attr'))
'AttributeError'
"""
return instance.__class__.__name__
|
a71884521a2bcea5f9fc5191606f5ec2f45018cb
| 66,252
|
def bound_scalar(x, lower, upper):
"""
Bound x between lower and upper, where x is a scalar value.
"""
return min(upper, max(lower, x))
|
73649b1bda4b1f5a99a01c72e8e02db1972e151a
| 66,254
|
def piano_key_from_midi(midi_note):
"""
Piano key number for a MIDI note number
:param midi_note: MIDI note number
:return: piano key number
"""
return midi_note - 20
|
33953e5995276513d0d1ac069ab55dbe4f235e2d
| 66,255
|
def _calH(s, l, m):
""" Eq. (52d) """
if (0 == l) or (0 == s):
return 0.
return - m*s/l/(l+1)
|
8b4b3b66250a215bce5c1a9ece3e64f4fb1fa6ee
| 66,256
|
def get_aap_exemptions(resource_props):
"""
Gets the list of parameters that the Heat author has exempted from following
the naming conventions associated with AAP.
:param resource_props: dict of properties under the resource ID
:return: list of all parameters to exempt or an empty list
"""
metadata = resource_props.get("metadata") or {}
return metadata.get("aap_exempt") or []
|
ad03f0c5e5892d708c21a48733ac4984c446c97d
| 66,262
|
def JoinURL(base_url, *args):
"""Join two or more parts of a URL.
Args:
base_url: str, base URL like https://example.com/foo.
*args: str arguments to join to the base URL, like 'bar' or '/bar'.
Returns:
String URL joined sanely with single slashes separating URL parts.
"""
url = base_url
for part in args:
if part.startswith('/'):
part = part[1:]
if not url or url.endswith('/'):
url += part
else:
url += '/' + part
return url
|
72a9a0681d114f238eab5058dd3929fe29c5ba18
| 66,264
|
def trim_alignment(alignment, cutoff):
"""
Trim the alignment based on the number of informative residues
:param alignment: The list of alignment strings
:type alignment: list of str
:param cutoff: The cutoff for informative residues
:type cutoff: float
:return: The revised list of alignments
:rtype: list of str
"""
alignmentlen = len(alignment[0])
naligns = len(alignment)
keepposn = []
for i in range(alignmentlen):
non_ir = 0 # non informative residues (i.e. '-')
for a in alignment:
if a[i] == '-':
non_ir += 1
if (1.0 * (naligns - non_ir) / naligns) >= cutoff:
keepposn.append(i)
newalignment = []
for a in alignment:
newalignment.append(''.join([a[i] for i in keepposn]))
return newalignment
|
2fdf40e8b45d1bc81ab4b729b4421ce9309ca836
| 66,266
|
def setDictDefaults (d, defaults):
"""Sets all defaults for the given dictionary to those contained in a
second defaults dictionary. This convenience method calls:
d.setdefault(key, value)
for each key and value in the given defaults dictionary.
"""
for key, val in defaults.items():
d.setdefault(key, val)
return d
|
4beed35cefa7d25a83e031356c0d07e48f9616de
| 66,267
|
from typing import Tuple
def train_val_size(dataset, val_ratio: float = 0.1) -> Tuple[int, int]:
"""
Return the train and validation data sizes based on split ratio.
Args:
dataset: A python collection
val_ratio: Ratio for validation dataset
Returns:
Tuple of number of rows for (training, validation)
"""
val_size = int(val_ratio * len(dataset))
train_size = len(dataset) - val_size
return train_size, val_size
|
223697f6809d3be0b45db74c8a87a90268194fa4
| 66,268
|
def unquote(value: str) -> str:
"""Removes the prefix and suffix if they are identical quotes"""
if value[0] in {'"', "'"} and value[0] == value[-1]:
return value[1:-1]
return value
|
c43f91b0ea69c0e4f058f8dfcdb2fb6f45b6b57a
| 66,271
|
def lower_words(iterable):
"""
turn all words in `iterable` to lower
"""
return [w.lower() for w in iterable]
|
f8edeb1124fe541ddb815f48e7bc704d13438747
| 66,276
|
def invite_user(slack_client, user, channel):
"""
Invite a user to a given channel.
"""
response = slack_client.api_call("channels.invite",
channel=channel,
user=user)
return response
|
6519e85299ef817aca5a9a734599e38d7b6a7905
| 66,281
|
import re
def kebab_case(inp):
""" Convert from `CamelCase` to `kebab-case`. """
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', inp)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
|
6e8d38188c213fe2efc2b8bfa43dd81b697cbcd0
| 66,282
|
def get_list_from_container(param, prop: str, t):
"""
Takes proto parameter and extracts a value it stores.
Args:
param: proto parameter
prop: name of the property to take
t: type of the value (int, float etc.) - only primitive ones
Returns:
If it is a container, returns the list with values.
If it is a single value of the given type - a list of single value.
If neither or property does not exist for param - empty list.
"""
if not param or (param and not hasattr(param, prop)):
return []
prop_val = getattr(param, prop)
if not prop_val:
return []
elif isinstance(prop_val, t):
return [prop_val]
elif len(prop_val) > 0:
return prop_val
return []
|
08f45c022d32f27265f8aa53d0fb61face1cb370
| 66,285
|
def variable_list(expreStr):
"""get the set of variable of expression.
Args:
expreStr: the mba expression string.
Return:
variableList: the list of variables.
"""
varSet = set(expreStr)
variableList = []
for i in varSet:
#the variable name
if i in ["x", "y", "z", "t", "a", "b", "c", "d", "e", "f"]:
variableList.append(i)
return variableList
|
0c8b03ca45361942a436329fe6686b05dfe77166
| 66,291
|
def regexHasSubstringWitness(regex):
"""Returns True if regex has a Subtring Witness among its witnesses
Substring witness: An input where
- two languages agree on a match, but
- disagree on the matching substring
"""
# Check if any of the SDWs disagreed about the contents of a match
for sdw in regex.semanticDifferenceWitnesses:
# Did any two sets of languages find different substrings to match?
uniqueMatchingSubstrings = set([
mr.matchContents.matchedString
for mr in sdw.matchResultToLangs
if mr.matched
])
if len(uniqueMatchingSubstrings) > 1:
#libLF.log("Substring witness: {}".format(sdw.toNDJSON()))
#libLF.log(" Unique matching substrings: {}".format(uniqueMatchingSubstrings))
return True
return False
|
4c2346d3ddd7b262d6879d386b04ccd54aaf3ced
| 66,295
|
def fmt_pkg(repouser, srcrepo, package):
"""Pretty print package line"""
return ('{user}/{repo}/{filename:<65} {distro:<20} {timestamp:>16}'.format(
user=repouser, repo=srcrepo, filename=package['filename'],
distro=package['distro_version'], timestamp=package['created_at']))
|
e01c18e0c5c5edbd82a63fd13f43c8945b9bd19e
| 66,296
|
def get_api_data_center(api_key):
"""Determine the Mailchimp API Data Center for `api_key`
http://developer.mailchimp.com/documentation/mailchimp/guides/get-started-with-mailchimp-api-3/
"""
data_center = api_key.split('-')[1]
return data_center
|
275e6de6838f03887653ac22fa8a26bf65dd119b
| 66,299
|
from typing import Any
import json
def loads(data: str, **kwargs: Any) -> Any:
"""
Alias for `json.loads`.
Arguments:
data -- A string with valid JSON.
kwargs -- List of additional parameters to pass to `json.loads`.
Returns:
An object created from JSON data.
"""
return json.loads(data, **kwargs)
|
ab1c081c630cf339d3a2674258d77b12bc2578f7
| 66,301
|
def getIsotopeFraction(concentrationDict,labeledDict,unlabeledDict, reverse_labels=False):
"""
Calculates and returns the fraction of atoms which are labeled in the set of
isotopolougues.
if reverse_labels is given, then it returns the fraction unlabeled atoms.
"""
numerator = 0
denominator = 0
for spec, conc in concentrationDict.iteritems():
numerator += conc * labeledDict[spec]
denominator += conc * (unlabeledDict[spec] + labeledDict[spec])
if reverse_labels:
return 1. - numerator / denominator
return numerator / denominator
|
824e2e6503255f51b198b1851d81796fa68d1f5f
| 66,302
|
import ast
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
|
268820f1ca785fa2d9a1f09b1190a1346b5c91dd
| 66,304
|
def find_next_empty(puzzle):
"""
Finds the next empty row or column that is not yet filled
:param puzzle:
:return:tuple(row,col) or (None,None) if no more space
"""
for r in range(9):
for c in range(9):
if puzzle[r][c] == -1:
return r, c # if rth row and cth col is empty then return that row and column
return None, None
|
b591d464574ac09741a01a48e0eb363b3d50ad56
| 66,319
|
def generating_polynomials_table() -> dict[int, tuple[int]]:
"""A table containing the coefficients of the generating polynomials
(Reed-Solomon algorithm).
Returns:
dict[int, tuple[int]]: Dictionary containing {number of correction
bytes: polynomial coefficients}
"""
table = {
7: (87, 229, 146, 149, 238, 102, 21),
10: (251, 67, 46, 61, 118, 70, 64, 94, 32, 45),
13: (74, 152, 176, 100, 86, 100, 106, 104, 130, 218, 206, 140, 78),
15: (8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105),
16: (120, 104, 107, 109, 102, 161, 76, 3, 91, 191, 147, 169, 182, 194,
225, 120),
17: (43, 139, 206, 78, 43, 239, 123, 206, 214, 147, 24, 99, 150, 39,
243, 163, 136),
18: (215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179,
5, 98, 96, 153),
20: (17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239,
156, 164, 212, 212, 188, 190),
22: (210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172,
98, 80, 219, 134, 160, 105, 165, 231),
24: (229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192,
226, 228, 218, 111, 0, 117, 232, 87, 96, 227, 21),
26: (173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53,
161, 21, 245, 142, 13, 102, 48, 227, 153, 145, 218, 70),
28: (168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195,
147, 205, 27, 232, 201, 21, 43, 245, 87, 42, 195, 212,
119, 242, 37, 9, 123),
30: (41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86,
239, 96, 222, 125, 42, 173, 226, 193, 224, 130, 156, 37,
251, 216, 238, 40, 192, 180)
}
return table
|
b94e5a8160ab3773296bc301527da850096cd811
| 66,321
|
def _Top(x, **unused_kwargs):
"""Top element from the stack."""
if isinstance(x, (list, tuple)):
return x[0]
return x
|
8e3ecc0a15f34b65374101f7c9aed22df16d838a
| 66,324
|
def htonl(x):
"""Convert 32-bit positive integers from host to network byte order."""
return (
((x) << 24 & 0xFF000000)
| ((x) << 8 & 0x00FF0000)
| ((x) >> 8 & 0x0000FF00)
| ((x) >> 24 & 0x000000FF)
)
|
21b69071b995ad94096760363c647d1fbc474c57
| 66,329
|
import torch
def revert_tensor_normalize(tensor, mean, std):
"""
Revert normalization for a given tensor by multiplying by the std and adding the mean.
Args:
tensor (tensor): tensor to revert normalization.
mean (tensor or list): mean value to add.
std (tensor or list): std to multiply.
"""
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor * std
tensor = tensor + mean
return tensor
|
6c3432ff2c3b668d34baa19cee986d787e6956dc
| 66,333
|
def read(filename):
"""
Lis le contenu d'un fichier et le retourne sous forme de string.
:param: (str) filename: le nom du fichier à lire
:return: (str): le contenu complet du fichier
"""
with open(filename) as file:
return file.read().strip()
|
cd2f887aff1cf6f4aa89e1b5c2bdc53827f41656
| 66,342
|
import math
def linear_slope(start_pos, end_pos):
"""Linear slope of line.
:param start_pos Tuple[int,int]:
:param end_Pos Tuple[int,int]:
:returns number or nan: nan if vertical slope
"""
try:
return \
(end_pos[1] - start_pos[1]) \
/ (end_pos[0] - start_pos[0])
except ZeroDivisionError:
return math.nan
|
010a70511531698aed914f3ad789498a46fccb79
| 66,346
|
def license_to_dict(lico):
"""
Return a dict of license data with texts usable for API calls given a ``lico``
ScanCode License object. Fields with empty values are not included.
"""
licm = dict(
key=lico.key,
category=lico.category,
short_name=lico.short_name,
name=lico.name,
owner=lico.owner,
is_exception=lico.is_exception,
language=lico.language or "en",
full_text=lico.text,
spdx_license_key=lico.spdx_license_key,
reference_notes=lico.notes,
homepage_url=lico.homepage_url,
text_urls="\n".join(lico.text_urls or []),
osi_url=lico.osi_url,
faq_url=lico.faq_url,
other_urls="\n".join(lico.other_urls or []),
)
return {k: v for k, v in licm.items() if v}
|
cde9cbcf590c8d560b7761fe5c48851894d909bf
| 66,352
|
from typing import Tuple
from typing import Dict
def process_enum_params(enum_params) -> Tuple[Dict, Dict]:
"""Condense enumerated params into two categories:
* Params with the same values across all endpoints
* Params with different values for some endpoints
"""
constant_enum_params = {name: options for (path, name, options) in enum_params}
def has_multiple_enums(name, options):
return any([n == name and o != options for (p, n, o) in enum_params])
def get_all_enums(name):
"""Get all enums with the given parameter name, along with their parent endpoint paths"""
return {p: o for (p, n, o) in enum_params if n == name}
# Find any enumerated params with same name but different values per endpoint
variable_enum_params = {
name: get_all_enums(name)
for name, options in constant_enum_params.items()
if has_multiple_enums(name, options)
}
for name in variable_enum_params:
constant_enum_params.pop(name)
return constant_enum_params, variable_enum_params
|
dbb98af4d79c3695a68cb159580daeb65f7ceb48
| 66,354
|
def get_user_info(data, channel=False):
"""
Get calling user's information in human readable format from request data
:param dict data:
:param bool channel:
:return str:
"""
if channel:
return "{} in {}".format(
data.get("user_name"),
data.get("channel_name")
)
return data.get("user_name")
|
658e4c2c2d8ed0a88f8b2d0398946097f9a8a682
| 66,355
|
import logging
def get_logger(name):
"""
Create new __logger__ with the given name
:param name: Name of the __logger__
:return: Logger
"""
logger = logging.getLogger(name)
return logger
|
d1a618a34a9fd1ceada925f32e8e58166ae2a605
| 66,357
|
def interpolate(a, x, y):
"""Interpolate between x and y. When a is 0 the value is x, when a is 1 the value is y."""
return (1 - a) * x + a * y
|
3d8a8e2496fcc9861a1f7aee81d08d626c84f304
| 66,358
|
def get_cp_cmd(script,
config_path,
data_path,
dataset_type):
"""
Get the string for a ChemProp command.
Args:
script (str): the path to the chemprop script you're running
config_path (str): path to the config file for the job
data_path (str): path to the dataset being used
dataset_type (str): type of problem you're doing (e.g. regression,
classification, multiclass)
Returns:
cmd (str): the chemprop command
"""
cmd = (f"python {script} --config_path {config_path} "
f" --data_path {data_path} "
f" --dataset_type {dataset_type}")
return cmd
|
527fbd5668b81fe6f0995c42e1dd9fea04c04a1e
| 66,359
|
def hex2rgb(hx):
"""
transform 6-digit hex number into [r,g,b] integers
:param hx:
:return:
"""
assert len(hx) == 6
rgb = []
for r in range(3):
ss = "0x" + hx[2 * r: 2 * r + 2]
rr = int(ss, 16)
rgb.append(rr)
return rgb
|
4c9e9aca428fbe04747b4b76119958efa43b7479
| 66,362
|
def hello(friend_name):
"""
Writes Hello to a friend.
:param friend_name: Our friend's name
:return: Return a message saying hello to our friend
"""
return "Hello, {0}!".format(friend_name)
|
ba68f63b27b3d97041042dd0d374119da582bd3b
| 66,364
|
def find_objective_function(model):
"""
Return reactions that are part of the objective function.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
return [rxn for rxn in model.reactions if rxn.objective_coefficient != 0]
|
6d9769cf7935f29036ccf12dfdccfbdb5f7f54f0
| 66,367
|
def parameter_code_sizer(opcode, raw_parameter_code_list):
"""Ensures parameter code list is the correct length, according to the particular opcode."""
parameter_lengths = {1:3, 2:3, 3: 1, 4: 1, 5: 2, 6:2, 7: 3, 8:3, 9:1, 99:0}
while len(raw_parameter_code_list) < parameter_lengths[opcode]:
raw_parameter_code_list.append(0)
return raw_parameter_code_list
|
bcd37db0f9c0c5299d97be81ecbd8da58355804d
| 66,368
|
def determine_config_values(config, hmm):
"""
Returns group of the HMM protein.
:param config: column patterns
:param hmm: HMM
:return: tuple of hmm and key
"""
for group in config:
for key in group:
if hmm in group[key]:
return (hmm, key)
return (hmm, "N/A")
|
2677ccaac4fe168b28e18bfcd2bf7e1fc74eb182
| 66,378
|
def lunToFreeBSDDiskName(lun,partnum):
"""
Convert lun to '/dev/da' + str(lun) + 'p' + partnum
"""
return '/dev/da'+ str(int(lun)) + 'p' + str(partnum)
|
ecbaaf56bf9e17aebae8f2039946d747e307c116
| 66,379
|
import unicodedata
def remove_accents(input_str):
"""
taken from https://stackoverflow.com/a/517974/3375944
:param input_str: string with accented letters
:return: string without accented letters
"""
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
|
585ec4e8e03c93c8be7cbedb0922399ed6cac9b5
| 66,380
|
def _extract_categories(annotations):
"""Extract categories from annotations."""
categories = {}
for anno in annotations:
category_id = int(anno['category_id'])
categories[category_id] = {'id': category_id}
return list(categories.values())
|
214b7e8e8a9d3275949270a2bcb2dc6075e76357
| 66,382
|
def FindConfiguration(configuration, name):
""" Finds a configuration value using it's name.
Returns the first configuration with a matching name. Returns None if no
matching configuration is found. """
return_value = None
for row in configuration:
if row['name'] == name:
return_value = row['value']
break
return return_value
|
ff4ca8b384160b5b5500f9a4b8a185e798968e6d
| 66,383
|
import csv
def read(filename):
"""reads the dat file outputs the list row by row."""
output=[]
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
output.append(row)
return (output)
|
30d1c0c83b69ef0daf235a183ec541ab473c5688
| 66,384
|
def letter_grade_to_field_name(letter_grade):
"""Take a letter grade (e.g. A+) and convert it to field name (e.g. a3)."""
if letter_grade in ['F', 'P', 'NP']:
return letter_grade.lower()
# Maps A+ to a3, A to a2, A- to a1, and so on
if len(letter_grade) == 2:
return letter_grade.replace('+', '1').replace('-', '3').lower()
return (letter_grade + '2').lower()
|
ae62c2f086f8af5eabe0c048a4b0a16e245dab58
| 66,387
|
def parse_header(line):
"""Parse a header line.
Args:
line: A header line as a string.
Returns:
None if end of headers is found. A string giving the continuation line
if a continuation is found. A tuple of name, value when a header line is
found.
Raises:
ValueError: If the line cannot be parsed as a header.
"""
if not line or line == "\r\n":
return None
if line[0] in " \t":
return line[1:].rstrip()
name, value = line.split(":", 1)
return (name.strip(), value.strip())
|
1a0b6495c4cd15507146842837970157f49081dd
| 66,388
|
from typing import Dict
def remove_special(text: str, REPLACE: Dict[str, str]) -> str:
"""Replaces special characters with conventional ones.
Parameters:
text: str - text where to replace the special characters
REPLACE: Dict[str, str] - dictionary of mappinps of special characters to their
substitutions
Returns:
text: str - text with the special characters replaced
"""
for char, subs in REPLACE.items():
text = text.replace(char.lower(), subs)
return text
|
82a77393bb738f6b743a0207f462c540d15e9503
| 66,389
|
import importlib
def load_selected(text):
"""
Load selected module or class in text
text syntax:
module
package.module
module::class
Returns:
A list of loaded objects
"""
result = []
for line in text.splitlines():
if not line:
continue
if "::" in line:
module, classname = line.rsplit("::", maxsplit=1)
module = importlib.import_module(module)
result.append(getattr(module, classname))
else:
result.append(importlib.import_module(line))
return result
|
fbbd83e127c299d6083b634d6f25dd2aa563f95f
| 66,391
|
def results_shrink(results, startYear=2010, endYear=2019):
"""
Select a subsection of all results.
Parameters
----------
results : TYPE
DESCRIPTION.
startYear : TYPE, optional
Season start of selection. The default is 2010.
endYear : TYPE, optional
Season start of end of selection. The default is 2019.
Returns
-------
resultsShrink : TYPE
Subsection of results.
"""
resultsShrink = results[(results.Season >= int(startYear)) &
(results.Season <= int(endYear))]
print('Results limited to ', str(startYear), ' through ', str(endYear))
print('Results shape: ', resultsShrink.shape)
return resultsShrink
|
be657469495697b5ae04ebd8478c5e1e54269a78
| 66,392
|
def _parsedrev(symbol):
"""str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
if symbol.startswith(b'D') and symbol[1:].isdigit():
return int(symbol[1:])
if symbol.isdigit():
return int(symbol)
|
c3db8cee3b6a4fdb1c02efc2b931989ac6a1b19b
| 66,393
|
def _parse_cgroup_ids(cgroup_info):
"""Returns a dictionary of subsystems to their cgroup.
Arguments:
cgroup_info: An iterable where each item is a line of a cgroup file.
"""
cgroup_ids = {}
for line in cgroup_info:
parts = line.split(':')
if len(parts) != 3:
continue
_, subsystems, cgroup_id = parts
subsystems = subsystems.split(',')
for subsystem in subsystems:
cgroup_ids[subsystem] = cgroup_id
return cgroup_ids
|
26cb6ced255c8e12e579a7eee6ce89c3255a0159
| 66,395
|
import shutil
import textwrap
def wrap(text: str, indent: str = "") -> str:
"""
Indent and line-wrap a string to fit in the current terminal width.
"""
width = shutil.get_terminal_size((80, 20)).columns
return textwrap.indent(textwrap.fill(text, width - len(indent)), indent)
|
3072f6c6f03747b6466f16e45c8f9ac81dd04ad6
| 66,396
|
def get_aspect_ratio(img):
""" return the aspect ratio of given image
ar = width//height
return an int, we don't care about exact ratios
"""
width, height = img.size
aspect_ratio = width//height
if aspect_ratio == 0:
aspect_ratio = 1
return aspect_ratio
|
9fecf60863f8d72b05e67afd10e6a374bf4ba3e1
| 66,405
|
import hashlib
def md5_hexdigest(filename, chunk_size=1024):
"""Return the MD5 hex-digest of the given file."""
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
while True:
chunk = f.read(chunk_size)
if not chunk:
# EOF
break
hash_md5.update(chunk)
return hash_md5.hexdigest()
|
1f69eb4abb92c39c618bf3781bf9cd67f7442f33
| 66,406
|
def to_base_n(number: int, base: int):
""" Convert any integer number into a base-n string representation of that number.
E.g. to_base_n(38, 5) = 123
Args:
number (int): The number to convert
base (int): The base to apply
Returns:
[str]: The string representation of the number
"""
ret_str = ""
while number:
ret_str = str(number % base) + ret_str
number //= base
return ret_str
|
a01fb3ba4f20edd4b048d31e112bc581ea9c05ac
| 66,409
|
def getPointOnLine(x1, y1, x2, y2, n):
"""Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module.
"""
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
return (x, y)
|
7fd7aa8c27ed2fc78e7a61ba8f180f5c124cff29
| 66,411
|
def average(numlist):
""" Return average of a list of numbers """
mean = 0
for i in numlist:
mean += float(i) / len(numlist)
return round(mean, 2)
|
a41e429bdd3e5dbce5638e4d75fd471ad2157345
| 66,412
|
def station_inventory_endpoint(api_key: str) -> str:
"""URL for station inventory given API key.
Referring to the `Accesso General`_ page, this corresponds to the dataset
'Inventario de estaciones de Valores Climatológicos' under 'Valores
Climatológicos'.
.. _`Accesso General`: https://opendata.aemet.es/centrodedescargas/productosAEMET
"""
return (
'https://opendata.aemet.es/opendata/api/valores/climatologicos/'
f'inventarioestaciones/todasestaciones/?api_key={api_key}'
)
|
0874ae574142f26ae132b1a5610e0084cabccee3
| 66,413
|
def locate_zephyr_base(checkout, version):
"""Locate the path to the Zephyr RTOS in a ChromiumOS checkout.
Args:
checkout: The path to the ChromiumOS checkout.
version: The requested zephyr version, as a tuple of integers.
Returns:
The path to the Zephyr source.
"""
return (checkout / 'src' / 'third_party' / 'zephyr' / 'main' /
'v{}.{}'.format(*version[:2]))
|
9e2bba1debcd60240701360348b4a894b7c3915a
| 66,415
|
def reorder_paths(paths, order_strs):
"""reorder a list of paths, using a list of strings.
Returns a new list of the paths, re-ordered so that the
first path will have the first string in it, the second path
will have the second string in it, and so on.
Parameters
----------
paths : list
of paths
order_strs : list
of strings, e.g. visual search stimulus names
Returns
-------
paths_out : list
paths, sorted by order_strs
Notes
-----
Used to sort paths to data and results, according to
visual search stimulus names
"""
if len(paths) != len(order_strs):
raise ValueError(
"length of paths does not equal length of order_strs"
)
paths_out = []
for order_str in order_strs:
for path in paths:
if order_str in path:
paths_out.append(path)
assert len(paths_out) == len(paths), "not all paths in paths_out"
return paths_out
|
862f7d19f7929763874c5d2c3748870ac6423a92
| 66,424
|
import math
def average_headings_degrees(h_list):
""" Averages a list of headings """
x = sum([math.sin(math.radians(h)) for h in h_list])
y = sum([math.cos(math.radians(h)) for h in h_list])
return math.degrees(math.atan2(x, y)) % 360
|
82814ec6fa6c65c2b08db8245ede076bd9f917b0
| 66,427
|
from typing import Dict
from typing import List
def _get_unflat_to_flat_param_ids(
flat_to_unflat_param_ids: Dict[int, List[int]],
) -> List[int]:
"""
Inverts the mapping ``flat_to_unflat_param_ids`` to be from unflattened
parameter ID to flattened parameter ID, where the unflattened parameter ID
is the index in the returned :class:`list`. There may be multiple
unflattened parameter IDs mapping to the same flattened parameter ID.
Args:
flat_to_unflat_param_ids (Dict[int, List[int]]): A mapping from
flattened parameter ID to a :class:`list` of corresponding
unflattened parameter IDs.
Returns:
List[int]: A mapping from unflattened parameter ID to flattened
parameter ID, where the unflattened parameter ID is the index in the
:class:`list`.
"""
# Construct as a dict and then convert to list
unflat_to_flat_param_ids = {}
for flat_param_id, unflat_param_ids in flat_to_unflat_param_ids.items():
for unflat_param_id in unflat_param_ids:
assert unflat_param_id not in unflat_to_flat_param_ids, \
"`flat_to_unflat_param_ids` has the unflattened parameter " \
f"ID {unflat_param_id} mapped to multiple flattened " \
"parameter IDs"
unflat_to_flat_param_ids[unflat_param_id] = flat_param_id
num_unflat_param_ids = len(unflat_to_flat_param_ids)
unflat_param_ids_set = set(unflat_to_flat_param_ids.keys())
assert unflat_param_ids_set == set(range(num_unflat_param_ids)), \
"The set of unflattened parameter IDs should be {0, ..., " + \
str(num_unflat_param_ids - 1) + "} but got " + \
f"{unflat_param_ids_set}"
return [
unflat_to_flat_param_ids[unflat_param_id]
for unflat_param_id in range(num_unflat_param_ids)
]
|
4e8d36d14be691fc1bf10cb0919fe752b3643eca
| 66,428
|
def local_part(id_):
"""nmdc:fk0123 -> fk0123"""
return id_.split(":", maxsplit=1)[1]
|
2a3ee4b7bb3b3f8cc953682e0af6270e5b666c9b
| 66,431
|
def getRandomSample(data, minimum=-1, maximum=361):
""" Return a random sample within a data set.
data is a DataFrame object from the pandas module.
minimum and maximum arguments are optional angles (degree) limits in which
the random sample needs to be, by default there is no limits.
"""
# Get a random sample
sample = data.sample(n=1).iloc[0] # take a random sample
# Take another one if it is not in the limits
while sample.angle < minimum or sample.angle > maximum:
sample = data.sample(n=1).iloc[0]
return sample
|
be6beda9cf798853542d139700fc8f39e8d1734f
| 66,432
|
def get_plain_file(file):
"""
Load text file.
Returns:
the stringfied file
"""
with open(file) as f:
body = f.read()
return body
|
1bca489895cb96a2a3dbaf69d773ccf02a88ac5b
| 66,438
|
def group_by_data(data, columns):
"""
This is a wrapper function which wraps around the pandas group by function.
:param data: Pandas data frame
:param columns: The columns which are to be used to be group the data frame on
:return: Pandas data frame
"""
return data.groupby(columns).size().to_frame('count').reset_index()
|
3c3345231233e083a43be78822da755f3bcb013c
| 66,442
|
def is_station_or_line(fid, shape, props):
"""
Returns true if the given (line or polygon from way) feature is a station
or transit line.
"""
railway = props.get('railway')
return railway in ('subway', 'light_rail', 'tram', 'rail')
|
dc3deab46a917a9624f2fc6edd7163fef4e02456
| 66,443
|
def generate_colnames(df):
"""Generates column names for an input dataframe.
Arguments:
df {dataframe} -- [dataframe for which column names are generated]
Returns:
[list] -- [list of generated colum names]
"""
colnames = ["CHR", "START", "END"]
for i in range(df.columns.str.contains("usercol").sum()-3):
colnames.append("label_{}".format(i+1))
colnames.append("gc")
colnames.append("num_N")
return colnames
|
1c03792c2e13af4d3b292055ba6ac624dc26101b
| 66,444
|
def hextable(raw: bytes, cols: int = 8) -> str:
"""
Formats raw (binary) message in tabular hexadecimal format e.g.
000: 2447 4e47 5341 2c41 2c33 2c33 342c 3233 | b'$GNGSA,A,3,34,23' |
:param bytes raw: raw (binary) data
:param int cols: number of columns in hex table (8)
:return: table of hex data
:rtype: str
"""
hextbl = ""
colw = cols * 4
rawh = raw.hex()
for i in range(0, len(rawh), colw):
rawl = rawh[i : i + colw].ljust(colw, " ")
hextbl += f"{int(i/2):03}: "
for col in range(0, colw, 4):
hextbl += f"{rawl[col : col + 4]} "
hextbl += f" | {bytes.fromhex(rawl)} |\n"
return hextbl
|
85503692f26b1354b1ac630cb0c36bbcf5d254c7
| 66,447
|
from typing import Dict
import re
def generate_tokens(text_file: str) -> Dict[str, int]:
"""Generate tokens from the given text file.
Args:
text_file:
A file that contains text lines to generate tokens.
Returns:
Return a dict whose keys are tokens and values are token ids ranged
from 0 to len(keys) - 1.
"""
tokens: Dict[str, int] = dict()
tokens["<blk>"] = 0
tokens["<sos/eos>"] = 1
tokens["<unk>"] = 2
whitespace = re.compile(r"([ \t\r\n]+)")
with open(text_file, "r", encoding="utf-8") as f:
for line in f:
line = re.sub(whitespace, "", line)
chars = list(line)
for char in chars:
if char not in tokens:
tokens[char] = len(tokens)
return tokens
|
260e397a167d7f6a73837ae3aa8543f1d521cd0d
| 66,448
|
def is_anti_symmetric(L):
"""
Returns True if the input matrix is anti-symmetric, False otherwise.
"""
result = len(L) == len(L[0])
for i in range(len(L)):
for j in range(len(L)):
result *= L[i][j] == -L[j][i]
return result
|
504ff54f1489bf957f190be6a5357af1f60e95bd
| 66,465
|
def roi_square(roi):
"""
Contract the larger dimension of the rectangle make the ROI dimensions square
"""
x, y, w, h = roi
if w > h:
x += int((w-h)/2.0)
w = h
elif h > w:
y += int((h-w)/2.0)
h = w
return x, y, w, h
|
ba5897350dad82b66adfdf04fcbece95a7a7b1a0
| 66,471
|
def inclusion_check(n_timepoints, mean_fd, max_fd, n_spikes, fd_th):
"""
Checking if participant is recommended to be excluded from analysis
based on motion parameters and spikes regressors.
Inputs
-------
n_timepoints: number of timepoints
mean_fd: mean framewise_displacement (FD)
max_fd: maximum FD
n_spikes: number of spikes
fd_th: threshold for mean FD
Outputs
-------
returns 0 if subject should be excluded due to head motion
or 1 if there is no reason to exclude subject based on submitted threshold.
"""
if mean_fd > fd_th:
return 0
elif max_fd > 5:
return 0
elif n_spikes/n_timepoints > 0.20:
return 0
else:
return 1
|
27038c7a741c5c083cfd2b675bdbcaa760002edb
| 66,473
|
from datetime import datetime
def str_to_date(date_string, spec="%Y%m%d"):
"""
Constructs a datetime.date object from string
Args:
date_string(str): Date in string (ex: 20130718)
spec(str): a datetime spec
Returns:
date(date)
Raises:
TypeError: date_string or spec is not a str
ValueError: date_string is not a valid date
"""
return datetime.strptime(date_string, spec).date()
|
37146e3f8715629535e01fbf6d51cfaa3e55e194
| 66,474
|
def ec_url(main_object):
"""Return URL entity in Demisto format for use in entry context
Parameters
----------
main_object : dict
The main object from a report's contents.
Returns
-------
dict
URL object populated by report contents.
"""
url = main_object.get('url')
url_ec = {
'URL': {
'Data': url
}
}
return url_ec
|
c5bdce827e1bfce83ac91e2d717618ea9fa579ad
| 66,476
|
def general_spatial_relation(sp_el1, sp_el2, f):
"""General function for computing spatial relations with a function f
given.
Parameters
----------
sp_el1: optional
the spatial information of element 1.
sp_el2: optional
the spatial information of element 2.
f: function
function to compute spatial relation between spatial objects.
Returns
-------
rel: float
number of the the relation between the spatial object.
"""
rel = f(sp_el1, sp_el2)
return rel
|
92e4dc07778212b43996df23b41a36fa3047ce4f
| 66,477
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.