content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _get_khoros_login_url(khoros_object):
"""This function returns the URL for the Khoros login endpoint.
.. versionadded:: 4.2.0
:param khoros_object: The core Khoros object
:type khoros_object: class[khoros.Khoros]
:returns: The URL string
"""
community_url = khoros_object.core_settings.get('community_url')
return f'{community_url}/restapi/vc/authentication/sessions/login'
|
0a81bc2d974fb8eab5a6799673c58a6df70a4fb1
| 56,705
|
import yaml
def _load_yaml(path: str):
"""
Load yaml file from path.
"""
with open(path) as file:
return yaml.load(file, Loader=yaml.SafeLoader)
|
0dd2db158727b27e54e4cce185c044ef0e725489
| 56,706
|
def host_info_format(host_info: dict):
"""
格式化机器信息,如果主机名和IP一样就只显示IP,否则在括号中显示主机名称
:param host_info:
:return:
"""
if host_info["name"] == host_info["ip"]:
return host_info["ip"]
else:
return "%s(%s)" % (host_info["ip"], host_info["name"])
|
9845f0eaa2869f4a08d434ab3134605bb109ad71
| 56,710
|
from pathlib import Path
def get_audio_path(audio_dir, song_id):
"""
Return the path to the mp3 given the directory where the audio is stored
and the Song ID.
Args:
audio_dir (String): The directory where the audio is stored.
song_id (String): The Song ID.
Returns:
String: The path to the mp3 file with the given song ID.
"""
return Path(audio_dir) / (str(song_id) + '.mp3')
|
7c9c00c5e89d9ee38cf461601718d2afe2a48160
| 56,716
|
def to_redis_key(aid: str) -> str:
"""Converter used to turn the supplied automation id into
the correct automation prefix for redis
:param aid: The id of the automation
:return: The automation's redis key prefix
"""
return f"a:{aid}"
|
a9d38b5ba5ef1563fa2ba51a888eb3e8a0a4ba7b
| 56,717
|
def words(text):
"""
Split the given text into a list of words, see :meth:`python:str.split`.
:param text: The input string
:type text: str
:return: The list of words in the text
:rtype: list
"""
return text.split()
|
d1b225fc71d982445dae769738c2d46a4ea90bbc
| 56,720
|
def get_contiguous_set(numbers, search, length = 2):
"""
Return a contiguous set of at least two numbers in a list of
numbers which sum to a given number.
"""
for index in range(len(numbers) - length):
# Get the contiguous set and organize it.
contiguous_set = numbers[index: index + length]
contiguous_set.sort()
# Check the sum of the set.
if sum(contiguous_set) == search: return contiguous_set
# Try to find the number again, however, increasing the size of the set.
return get_contiguous_set(numbers, search, length + 1)
|
da622b4851a005e9c0a18041c20228acde101be7
| 56,721
|
def ndvi(nir, red):
"""Compute Normalized Difference Vegetation Index from NIR & RED images."""
return (nir - red) / (nir + red)
|
7d160212a4e91246b8fdd8e65b82aa52f14ca78c
| 56,730
|
def get_modal_scale(scale_notes, mode):
"""Return the scale after applying a musical mode to it
Arguments:
scale_notes -- A list of Note objects of which the scale to transform is made
mode -- int representing mode value as in mode_info dict
"""
return scale_notes[mode-1:]
|
fe3702154b1f41944bf3c415aff7f73cd68b29a6
| 56,733
|
from typing import Iterable
from typing import Any
from typing import List
def dedupe(items: Iterable[Any], only_neighbors: bool = False) -> List[Any]:
"""Deduplicate a list while keeping order
If only_neighbors is True, dedupe will only check neighboring values
"""
ret: List[Any] = []
for item in items:
if (only_neighbors and ret and ret[-1] != item) or item not in ret:
ret.append(item)
return ret
|
ca12d0fbd267409a504b24e44f5f43b979cd7fb6
| 56,734
|
def learning_rate_with_decay(lr, global_step, discount_step, discount_factor):
"""
Near-optimal step decay learning rate schedule as proposed by https://arxiv.org/abs/1904.12838.
"""
return lr * discount_factor if global_step % discount_step == 0 and global_step > 0 else lr
|
d5d46f6033b315ed726adad2a23d0d81476552b6
| 56,735
|
def is_tty(file):
""" Calls file.isatty() if available. If it's not available, returns False.
"""
try:
istty = file.isatty()
except AttributeError:
return False
return istty
|
dcafa53a1dda1d26de3d0dfae8f5decec7ccad1c
| 56,738
|
import logging
def create_root_logger(handler):
"""Creates a pre-configured root logger.
It sets up the logger with custom formatting and attaches the provided
handler.
Args:
handler: The log handler that shall be used (type `logging.Handler`).
Returns:
A logger object of type `logging.Logger`.
"""
handler.setFormatter(
logging.Formatter(
'%(asctime)s.%(msecs)03d %(name)-15s %(levelname)-4s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
return root_logger
|
763d4bde4819f95b166fceb106c25e835746658b
| 56,739
|
def get_ct_feat_names(ct, other_names):
"""Get the feature names from a ColumnTrasformer and combine
with other feature names.
Note: In recent versions of sklearn there is support for this
without having to create a function. However, Kaggle was running
an older version of sklearn in their kernels during this competition,
hence creation of this function.
Parameters
----------
ct : sklearn ColumnTransformer
A fitted sklearn ColumnTransformer.
other_names : list of str
The other feature names to append
Returns
-------
names : list of str
The list of all feature names after a ColumnTransformer
transforms a dataset.
"""
names = []
names += other_names
return names
|
17bd8a3fd5f650e11facb141ba5c6358bf011b8a
| 56,743
|
def form_openid_url(url, username):
"""Fill in username into url."""
return url.replace('<username>', username)
|
2a15e9612b38193adb7fcb20c3433a3d78e9b56d
| 56,745
|
def split_paths(path):
"""Return a list of path elements.
Args:
path: A YANG path string specified as /a/b
Returns:
A list of path components
"""
components = path.split("/")
return [c for c in components if c]
|
0da7f9ad301f8bf13ccb3f4b6e87d69320c3836c
| 56,746
|
def get_mutations(by_lineage):
"""
Extract common mutations from feature vectors for each lineage
:param by_lineage: dict, return value from process_feed()
:return: dict, common mutations by lineage
"""
result = {}
for lineage, samples in by_lineage.items():
# enumerate features
counts = {}
for sample in samples:
for diff in sample['diffs']:
feat = tuple(diff)
if feat not in counts:
counts.update({feat: 0})
counts[feat] += 1
# filter for mutations that occur in at least half of samples
common = [feat for feat, count in counts.items() if count/len(samples) >= 0.5]
result.update({lineage: common})
return result
|
1f4d2ff48013ad7a4d9f998fdd3a905e94c87b13
| 56,750
|
def sock_files(dev_uids_list, is_spp_pri=False):
"""Return list of socket files on host and containers.
The name of socket files is defined with a conventional ones described
in DPDK doc, though you can use any name actually.
For spp_primary, path of sock file is just bit different because it is
shared among other SPP processes.
Here is an example of two vhost devices.
[vhost:0, vhost:1]
=> [
{'host': '/tmp/sock0, 'guest': '/var/run/usvhost0'},
{'host': '/tmp/sock1, 'guest': '/var/run/usvhost1'}
]
"""
socks = {
'vhost': {
'host': '/tmp/sock{:s}',
'guest': '/var/run/usvhost{:s}'},
'memif': {
'host': '/tmp/spp-memif.sock',
'guest': '/var/run/spp-memif.sock'}}
res = []
is_memif_added = False
for dev_uid in dev_uids_list:
dev_uid = dev_uid.split(':')
if (dev_uid[0] == 'memif') and (not is_memif_added):
# Single sock file is enough for memif because it is just used for
# negotiation between master and slaves processes.
if is_spp_pri:
res.append({
'host': socks['memif']['host'],
'guest': socks['memif']['host']})
else:
res.append({
'host': socks['memif']['host'],
'guest': socks['memif']['guest']})
is_memif_added = True
elif dev_uid[0] == 'vhost':
if is_spp_pri:
res.append({
'host': socks['vhost']['host'].format(dev_uid[1]),
'guest': socks['vhost']['host'].format(dev_uid[1])})
else:
res.append({
'host': socks['vhost']['host'].format(dev_uid[1]),
'guest': socks['vhost']['guest'].format(dev_uid[1])})
return res
|
3531a60ebe31026f60445f7b94e00fe7b1b7163d
| 56,751
|
def annual_edition_for(title, notice):
"""Annual editions are published for different titles at different
points throughout the year. Find the 'next' annual edition"""
if title <= 16:
month = '01'
elif title <= 27:
month = '04'
elif title <= 41:
month = '07'
else:
month = '10'
notice_year = int(notice['effective_on'][:4])
if notice['effective_on'] <= '%d-%s-01' % (notice_year, month):
return notice_year
else:
return notice_year + 1
|
ab19e1403349cd0d20833938f0b6673ca048c2d9
| 56,752
|
from typing import Type
from enum import Enum
from typing import List
def get_enum_names(enum_cls: Type[Enum]) -> List[str]:
"""
Returns a list containing the names of all members of the Enum class passed as
`enum_cls`.
:param enum_cls: Type of the enum to retrieve the names for
:return: list of Enum members names
"""
return [member.name for member in enum_cls]
|
bb2f9dc5d9ae3d45a2c6b17be61c79688dd45e0f
| 56,756
|
import json
def to_gql_input(ob) -> str:
"""Convert plain Python objects to their GraphQL representation.
>>> to_gql_input({"a": {"b": 1, "c": True, "d": [4.2, 3.4], "e": "e"}})
'{a: {b: 1, c: true, d: [4.2, 3.4], e: "e"}}'
"""
if isinstance(ob, dict):
inner = ", ".join(f"{k}: {to_gql_input(v)}" for k, v in ob.items())
return "{%s}" % inner
elif isinstance(ob, list):
inner = ", ".join(to_gql_input(v) for v in ob)
return "[%s]" % inner
elif isinstance(ob, (str, int, float, bool)):
return json.dumps(ob)
else:
raise ValueError("Cannot format %r" % ob)
|
7a8bc2e78c1781017213c0143f9656d2c55eca81
| 56,759
|
import re
def generate_trigrams(text: str) -> set:
"""
Return a set of trigrams in a given text.
Preprocessing is done where each space is changed to two spaces. Also, two
spaces to the beginning and one space to the end of the string are added.
ލިޔެފައިވާ ބަހުގެ ނުވަތަ ބަސްތަކުގެ ޓްރައިގްރާމްތައް ސެޓެއްގެ ގޮތުގައި އަނބުރާ ދޭނެއެވެ
>>> generate_trigrams("ބަޔަކު")
{
" ބ",
" ބަ",
"ބަޔ",
"ަޔަ",
"ޔަކ",
"ަކު",
"ކު ",
}
"""
text = text.strip()
text = re.sub(" ", " ", text)
text = f" {text} "
return set([text[i : i + 3] for i in range(len(text) - 2)])
|
5678993585a0f3e42a6e4212cef6e4b5080ee23e
| 56,761
|
def sym_install(sources, symlinks):
"""Generates "dosym" commandlines.
Args:
sources: A list of source files of symbolic links.
symlinks: A list of symbolic links to be created.
Returns:
A list of commandlines of "dosym".
[
['dosym', 'sources[0]', 'symlinks[0]'],
['dosym', 'sources[1]', 'symlinks[1]'],
...
]
"""
assert len(sources) == len(symlinks), ('the number of symlinks must be the'
' same as sources')
return [['dosym', source, symlink]
for source, symlink in zip(sources, symlinks)]
|
6d0360a2a9ad8d594723d4e9b487d357d621b7b4
| 56,763
|
def dict_to_fun(dictionary):
"""
Wraps a function around a dictionary.
Parameters
----------
dictionary: a dict
Returns
-------
f: a function
f(a,b,c,...) == X if and only if dictionary[(a,b,c,...)] == X
"""
if callable(dictionary):
return dictionary
else:
return lambda *keys: dictionary[keys]
|
6c4afe1d781f16a16820058330fe647ac91ad702
| 56,765
|
def from_size(n):
"""
Constructs a zeroed, *n* sized vector clock.
"""
return (0,) * n
|
64de5493f441c077f63d5416108dd8962e932a9b
| 56,769
|
def new_chr(pos):
"""
Turn a number to the letter in that position in the alphabet
"""
return chr(pos + 96)
|
51f1df6af71aa7593dc7b22e08b1ba97c536ed67
| 56,771
|
def friendly_channel_type(channel_type: str) -> str:
"""Return a human readable version of the channel type."""
if channel_type == "controlled_load":
return "Controlled Load"
if channel_type == "feed_in":
return "Feed In"
return "General"
|
bb9eb62effd5008950860ec10d6fa272cafc9e7c
| 56,772
|
def parse_float(float_str, default=None):
"""Parse float."""
float_str = float_str.replace(',', '')
float_str = float_str.replace('-', '0')
try:
return (float)(float_str)
except ValueError:
return default
|
1c97b37756e2cd702a5fe83f83654026745b4790
| 56,775
|
def tsg_count(driver_dataframe):
"""Function to count number of tumor suppressor genes from final driver gene dataframe."""
tsg = driver_dataframe["driver_role"] == "TSG"
df_tsg = driver_dataframe[tsg][["gene", "driver_role"]
].drop_duplicates().reset_index(drop=True)
return len(df_tsg.index)
|
9f72868250ba4b4ddb4efa9a922fcca8948e8d03
| 56,779
|
import math
def calc_rmsd(msd):
"""Calculate RMSD from a given MSD.
"""
if msd < 0.0:
return 0.0
return math.sqrt(msd)
|
a1908bb2270b30f0c7a085f729c4c68fa1ee90a4
| 56,781
|
def MER2unit(MER, M63=0.1):
"""Solving for unit mass - comparable to HYSPLIT output
Inputs: MER in kg/s, mass fraction of fine ash (M63) Fine ash is < 63um according to Mastin et al. 2009.
The mass fraction of fine ash is highly variable and poorly known.
Output: unit mass in grams (unit_mass), unit mass of fine ash in grams (mass63).
Assume model output is one unit mass (per hour - based on MER conversion)."""
unit_mass = MER * 3600 * 1000
mass63 = unit_mass * M63
return unit_mass, mass63
|
385d165a130b1febd59ae83095911c8fdf2adf5c
| 56,785
|
def users_data(db):
"""User data fixture."""
return [
dict(email='user1@inveniosoftware.org', password='pass1'),
dict(email='user2@inveniosoftware.org', password='pass1'),
]
|
52506489392cf7eb710d69cf3edf82d0e798d1ae
| 56,786
|
def is_contained(e1, e2):
""" Checks if regex match e2 is contained (overlapping) in regex match e1
ex :
e1 = (0, 10, "cde abc ghf") and e2 = (4, 6, "abc")
e2 is contained in e1 (shown in the start/end indices of the match)
Each tuple is (match_start_index, match_end_index, string_matched)
:param e1 : regex-matched element
:type e1 : str
:param e2 : regex-matched element
:type e2 : str
:return : True if e2 is contained in e1
:rtype : bool
"""
return e1[0] < e2[1] and e1[1] < e2[1]
|
229958c52b927040438a73b6bd64c644391eb3b0
| 56,788
|
def is_dead(player):
"""Check if player is dead."""
return player['Hit Points'] <= 0
|
6af8a50dabe6a68eebe3227f8651963d24d1a385
| 56,789
|
def decompose_timedelta(duration):
"""
Decompose a duration (timedelta) object into hours, minutes, and seconds.
"""
days, seconds = duration.days, duration.seconds
hours = (days * 24 + seconds) // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
return hours, minutes, seconds
|
99bb1c773fc7558a4f59fe95b3ee878b02a86771
| 56,791
|
import importlib
def _str_to_class(module_name, class_name):
"""
Return a class instance from a string reference
"""
class_instance = None
try:
module_instance = importlib.import_module(module_name)
try:
class_instance = getattr(module_instance, class_name)
except AttributeError:
pass
except ImportError:
pass
return class_instance
|
545f9447ade3b306e32c72f238977f5f9f6fa8e1
| 56,793
|
def centroid_decomposition(graph):
"""
Given a tree, this function is a generator that
1. Roots the tree at its centroid (modifying graph)
2. Yields centroid
3. Removes centroid from the graph
4. Recurses on the forest left after the removal
This generator makes working with centroid decomposition easy. It yields the n
centroids involved in the decomposition. It also keeps the graph rooted at the yielded
centroid by modifying the input variable graph. In total this takes O(n log n) time.
Input:
graph: list of lists where graph[u] is a list containing all neighbours of node u
Example:
>>> graph = [[1], [0,2], [1]]
>>> for centroid in centroid_decomposition(graph):
>>> bfs = [centroid]
>>> for node in bfs:
>>> bfs += graph[node] # Valid since graph is rooted at the centroid
>>> print('BFS from centroid:', bfs)
BFS from centroid: [1, 0, 2]
BFS from centroid: [0]
BFS from centroid: [2]
"""
n = len(graph)
bfs = [n - 1]
for node in bfs:
bfs += graph[node]
for nei in graph[node]:
graph[nei].remove(node)
size = [0] * n
for node in reversed(bfs):
size[node] = 1 + sum(size[child] for child in graph[node])
def reroot_centroid(root):
N = size[root]
while True:
for child in graph[root]:
if size[child] > N // 2:
size[root] = N - size[child]
graph[root].remove(child)
graph[child].append(root)
root = child
break
else:
return root
bfs = [n - 1]
for node in bfs:
centroid = reroot_centroid(node)
bfs += graph[centroid]
yield centroid
|
12c5c58c171e75176361dc4f05bacff3e1c5c266
| 56,797
|
def encodenonascii(txt):
"""Convert non-ASCII characters to HTML entities"""
res = []
for char in txt:
if ord(char) < 128:
res.append(char)
else:
res.append('&#%i;' % ord(char))
return ''.join(res)
|
bb586878f7a73290961527e034a326fdcfbc317e
| 56,798
|
import math
def angle(a, b):
"""Returns angle between two 2d points in radians"""
return math.atan2(a[1] - b[1], a[0] - b[0])
|
e632dcad47db37173802ddea965b14a37eef9cef
| 56,800
|
def signature_to_bytes(v: int, r: int, s: int) -> bytes:
"""
Convert ecdsa signature to bytes
:param v:
:param r:
:param s:
:return: signature in form of {bytes32 r}{bytes32 s}{uint8 v}
"""
byte_order = 'big'
return (r.to_bytes(32, byteorder=byte_order)
+ s.to_bytes(32, byteorder=byte_order)
+ v.to_bytes(1, byteorder=byte_order))
|
cab33c0214d4f45ecf4635380400447424177a49
| 56,802
|
def constant(c):
"""Return always the same value, no matter the input.
Useful for testing.
>>> f = constant(1)
>>> f(0)
1
"""
def f(_):
return c
return f
|
e96fb09548201a43dc95255c36000350a102d3ab
| 56,806
|
def extract_reason(openfield):
"""
Extract optional reason data from openfield.
:param openfield: str
:return: str
"""
if "," in openfield:
# Only allow for 1 extra param at a time. No need for more now, but beware if we add!
parts = openfield.split(",")
parts.pop(0)
for extra in parts:
key, value = extra.split("=")
if key == "reason":
return value
return ""
|
743ef1be5b4190bad156db15b4914e5c8e392404
| 56,807
|
def quote_string(v):
"""
RedisGraph strings must be quoted,
quote_string wraps given v with quotes incase
v is a string.
"""
if not isinstance(v, str):
return v
if len(v) == 0:
return '""'
if v[0] != '"':
v = '"' + v
if v[-1] != '"':
v = v + '"'
return v
|
8df72ccccbd65be7525d2cbe8780d8f878881c2c
| 56,808
|
def infer_free_values(A, b):
"""
Infer the indices of fixed values in an optimization vector.
Parameters
----------
A : np.ndarray
The constraint matrix.
b : np.ndarray
The constraint values.
Returns
-------
fixed : list
The list of fixed indices.
"""
# find locations of b == 0, since pmf values are non-negative, this means they are identically zero.
free = [i for i, n in enumerate(A[b == 0, :].sum(axis=0)) if n == 0]
while True:
# now find rows of A with only a single free value in them. those values must also be fixed.
fixed = A[:, free].sum(axis=1) == 1
new_fixed = [[i for i, n in enumerate(row) if n and (i in free)][0] for i, row in enumerate(A) if fixed[i]]
free = list(sorted(set(free) - set(new_fixed)))
if not new_fixed:
break
return free
|
ba3313f96387c6e55a29220b748a61d226548c0f
| 56,809
|
def u(s, encoding="utf-8"):
""" str/int/float/bytes type to utf-8 string """
if isinstance(s, (str, int, float)):
return str(s)
elif isinstance(s, bytes):
return s.decode(encoding)
else:
raise TypeError(type(s))
|
e8bc7f798e96729ef618018dc32c5c458d76d9d8
| 56,811
|
import copy
def make_param_spec(param_spec, location):
"""Copy given parameter specification and add location ('in' property) to it."""
new_spec = copy.deepcopy(param_spec)
new_spec['in'] = location
return new_spec
|
1d889efd2a79ab53c057224d7c7729abf3890b5f
| 56,812
|
def get_padding_value(padding, kernel):
"""Returns padding value for kernel."""
if padding == "valid":
return 0
elif padding == "same":
return kernel // 2
elif padding == "full":
return kernel - 1
raise ValueError("accepted paddings are 'valid', 'same' or 'full', found " +
padding)
|
8927486cd07e05dd4fb6897dab97f5f61cad7d32
| 56,814
|
def mirror_borders(tile, image):
""" Mirror the border values for the tiles extending the image borders. """
# lower X-edge
diff = max(0, image.offset.x - tile.offset.x)
tile.data[:, :diff, :] = tile.data[:, (2*diff - 1):(diff - 1):-1, :]
# upper X-edge
size = tile.size.x
diff = max(0, (tile.offset.x + size) - (image.offset.x + image.size.x))
tile.data[:, (size - 1):(size - diff - 1):-1, :] = (
tile.data[:, (size - 2*diff):(size - diff), :]
)
# lower Y-edge
diff = max(0, image.offset.y - tile.offset.y)
tile.data[:diff, :, :] = tile.data[(2*diff - 1):(diff - 1):-1, :, :]
# upper Y-edge
size = tile.size.y
diff = max(0, (tile.offset.y + size) - (image.offset.y + image.size.y))
tile.data[(size - 1):(size - diff - 1):-1, :, :] = (
tile.data[(size - 2*diff):(size - diff), :, :]
)
return tile
|
97fa29dd6b18cd52fdbe965e63505b55bd18189d
| 56,815
|
import re
def find_cite_keys(markdown):
"""
Finds the cite keys in the markdown text
This function can handle multiple keys in a single reference
Args:
markdown (str): the markdown text to be extract citation
keys from
"""
cite_regex = re.compile(r"(\[(?:@\w+;{0,1}\s*)+\])")
cite_keys = cite_regex.findall(markdown)
return list(cite_keys)
|
9535710f43ab6fa6f5d2115428011aa54cb9fbee
| 56,817
|
def comma(member, memberList):
""" Convenience function for processing comma lists.
A comma is returned for all but the last member in the list """
if not (member is memberList[-1]):
return ','
else:
return ''
|
98cdd8962db5be75a687948eb33a841e231258e2
| 56,825
|
from datetime import datetime
def convert_unix_to_date(utime):
"""Returns formatted date time string
Given a UNIX timestamp, this function reformats the timestamp
to a string
Parameters
----------
utime : int
UNIX timestamp, e.g. 1606488670
Returns
-------
datetime
Examples
--------
>>> date_conversion(1606488670)
2020-11-27 17:00:00
"""
ts = int(utime)
return datetime.utcfromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
|
1d4c35567ff3f18275b7d2a82e2109a224477f27
| 56,830
|
import string
def groner_repr(val: int, base: int = 10, use_standard_format: bool = True) -> str:
""" Return string nuber representation with base from 2 to 36
using standard base prefix for binary, oct, decimal or hex or custom base prefix [base value]
Keyword arguments:
val -- int, to be converted
base -- int, conversion base
use_standard_format -- bool, include or not conversion base prefix
"""
rng = string.digits[::] + string.ascii_uppercase[::]
res = ''
is_negative = False
if type(val) != int or type(base) != int or type(use_standard_format) != bool:
raise TypeError("Argument type error")
if base < 2 or base > 36:
raise ValueError("Base should be in range from 2 to 36")
if use_standard_format:
if base == 2:
return bin(val)
elif base == 8:
return oct(val)
elif base == 10:
return str(val)
elif base == 16:
return hex(val)
if val < 0:
val = -val
is_negative = True
while val > base:
res += str(rng[val % base])
val //= base
else:
res += str(rng[val%base])
res = f'0[{str(base)}]' + res[::-1]
if is_negative:
res = '-' + res
return res
|
a9cec13f892ed2f62226050588506d6bfc81c187
| 56,832
|
import toml
from pathlib import Path
def get_colordata(name):
"""
Load color map data from setting file.
Parameters
----------
name : str
Colormap name. Currently, ``Dark_rainbow``, ``rainbow2``, ``sls_SAOImage``,
``rainbow3``, ``rainbow_SAOImage``, ``b_SAOImage``, ``spectrum_Starlink``,
and ``Hue_sat_value2`` are supported.
"""
color = toml.load(open(Path(__file__).parent / "colordata" / "cmapdata.toml"))
color_numbers = color["table"][name]
return color_numbers
|
42b86661b793cc9d1796cdb320efc448e8d752b2
| 56,833
|
from typing import Tuple
def unpack_href_location(href: str) -> Tuple[str, str, int, int]:
"""
Return the session name, URI, row, and col_utf16 from an encoded href.
"""
session_name, uri_with_fragment = href[len("location:"):].split("@")
uri, fragment = uri_with_fragment.split("#")
row, col_utf16 = map(int, fragment.split(","))
return session_name, uri, row, col_utf16
|
b0f24c16ab1c6cc47900468abf8a5590d054a79c
| 56,837
|
def remove_subtree(doc, clean_sent, indices_to_remove_subtree):
"""
This function removes a subtree given a spaCy Doc object and
the indices of each token to remove. The indices are used in a
spaCy Span object to get the tokens of the subtree.
Note:
Span object -> slice doc[start : end]
start: The index of the first token of the span
end: The index of the first token after the span.
Args:
doc: spaCy Doc of the clean sentence
clean_sent:str the string of the clean sentence Doc object
indices_to_remove_subtree: list of indices of the subtree to be removed
Return:
new_sent:str the newly trimmed sentence
"""
# Create span of the subtree to be removed
span_of_subtree_start = indices_to_remove_subtree[0]
span_of_subtree_end = indices_to_remove_subtree[-1] + 1
span_to_be_removed = doc[span_of_subtree_start:span_of_subtree_end].text
# Remove span from the clean sentence
new_sent = clean_sent.replace(span_to_be_removed, "")
# Return the new sentence
return new_sent
|
67c358c87d5c9df2a613e59725e69087559142b0
| 56,838
|
def bib_keys(config):
"""Return all citation keys."""
if "bib_data" not in config:
return set()
return {entry["ID"] for entry in config["bib_data"]}
|
a5cb0b2d7ad0389fbadcf3b82a0ea24122faf32a
| 56,840
|
def remove_wws_styles(font_name : str):
"""
Remove font WWS (weight, width, and slope) styles from `font_name`
Arguments:
font_name (str): The font name from which the styles will be removed.
"""
return font_name.replace("Thin", "").replace("ExtraLight", "").replace("Light", "").replace("Regular", "").replace("Medium", "").replace("SemiBold", "").replace("ExtraBold", "").replace("Bold", "").replace("Black", "").replace("Italic", "").strip()
|
ad59fcfb85d23a138d374036e7296b78a899ec63
| 56,845
|
def img_crop(img, cx=89, cy=121, w=128, h=128):
"""
crop images based on center and width, height
:param img: image data, numpy array, shape = [height, width, RGB]
:param cx: center pixel, x
:param cy: center pixel, y
:param w: width, even number
:param h: height, even number
:return: img_crop
"""
img_cropped = img[cy-h//2: cy+h//2, cx-w//2: cx+w//2]
return img_cropped
|
d994d6b39427109e702139c89aec2689bd5299ec
| 56,847
|
def get_node_attributes(H, name=None):
"""Get the node attributes for a hypergraph
Parameters
----------
H : Hypergraph object
The hypergraph to get node attributes from
name : string, optional
Attribute name. If None, then return the entire attribute dictionary.
Returns
-------
dict of dict
Dictionary of attributes keyed by node.
See Also
--------
set_node_attributes
set_edge_attributes
get_edge_attributes
"""
if name is None:
return dict(H._node_attr)
else:
return {n: d[name] for n, d in H._node_attr.items() if name in d}
|
51b110806efaf2809f6ed9b17aaba13aee6e4e8f
| 56,858
|
def lower(_, text):
""" Convert all letters in content to lowercase. """
return text.lower()
|
b776bfe880206975624d6c2fd8de53544494546e
| 56,862
|
def lr_mark(line, lmark, rmark):
""" read a string segment from line, which is enclosed between l&rmark
e.g. extract the contents in parenteses
Args:
line (str): text line
lmark (str): left marker, e.g. '('
rmark (str): right marker, e.g. ')'
Return:
str: text in between left and right markers
"""
lidx = line.find(lmark)
assert lidx != -1
ridx = line.find(rmark)
assert ridx != -1
return line[lidx+1:ridx]
|
cce0afcabafeafc4fd496243e6ec3ddf475e15af
| 56,867
|
def extract_target_size(proc: str):
"""
Extracts target size as a tuple of 2 integers (width, height) from
a string that ends with two integers, in parentheses, separated by
a comma.
"""
a = proc.strip(")").split("(")
assert len(a) == 2
b = a[1].split(",")
assert len(b) == 2
return (int(b[0]), int(b[1]))
|
4d2e4d904ce3a00116eac7f158058cf1c6d22e47
| 56,870
|
def get_core_dests(geopackage, buff, study_area, dests):
"""
Create a negative buffered convex hull of destinations. This will get to the core of the destination data.
Parameters
----------
geopackage : geopandas.GeoDataFrame
the osm derived spatial data
buff : int
the what to multiply the smaller direction by to find urban core
study_area : shapely.Polygon or shapely.MultiPolygon
the study area boundary to negative-buffer
dests : geopandas.GeoDataFrame
the osm destinations or official destinations
Returns
-------
dests_core
destinations that fall within the core (negative-buffered) study area
"""
# Define the extents of the study area
xmin, ymin, xmax, ymax = geopackage["geometry"].total_bounds
x = xmax - xmin
y = ymax - ymin
if x < y:
buffer_dist = buff * x
else:
buffer_dist = buff * y
study_area_core = study_area.buffer(-buffer_dist)
mask = dests.within(study_area_core)
dests_core = dests[mask]
return dests_core
|
3f10abc8759685dd903e79aa4fd7d175538aa660
| 56,871
|
def package(x):
"""
Get the package in which x was first defined, or return None if that cannot be determined.
"""
return getattr(x, "__package__", None) or getattr(x, "__module__", None)
|
326157fe80393f31f26c714cd362112c20b7417b
| 56,872
|
def split_edges(lower, upper, g):
"""Select the edges of the subgraph based on indices.
Parameters
----------
lower, upper: limits of calculation
g: the original graph
Returns
-------
the edges of the subgraph"""
sub_edges = []
for edge in g.edges:
x, y, v = edge
if lower <= min(x,y) and max(x,y) < upper:
sub_edges.append(edge)
return sub_edges
|
fcf415f18697fecfc4ba97d50259fd6c74d2447a
| 56,873
|
def _overall_stats(label, tuples):
"""
Computes overall accuracy; returns in 'Settings'-friendly format.
Args: tuples([(int, int)]) Each entry is (# correct, # total) label (str)
What to call this
Returns: (str, str) key, val of settings column to add
"""
n_correct = sum(tp[0] for tp in tuples)
n_total = sum(tp[1] for tp in tuples)
return 'OVERALL %s acc' % (label), '%d/%d (%0.2f%%)' % (
n_correct, n_total, (n_correct*100.0)/n_total)
|
1028fcf832b259eb1df4b7484d680f41d0ad0b32
| 56,877
|
def hamming(s1, s2):
"""Count the number of differing bits between two strings"""
total = 0
for c1, c2 in zip(s1, s2):
total += sum(((ord(c1)^ord(c2)) >> i) & 1 for i in range(8))
return total
|
02e6fe6dd1b0c44be3333956d31921b418f56413
| 56,879
|
def clamp(v, low, high):
"""clamp v to the range of [low, high]"""
return max(low, min(v, high))
|
31ac1f2aecf644d62881df4d641309b52cfadf70
| 56,881
|
def start_criteria(date, flags):
"""Check start criteria
Args:
date (datetime): date of current element
flags (dict): List of flags
Returns:
bool: returns true if start criteria is achieved
"""
result = True
if 'from_date' in flags:
result = date >= flags['from_date']
return result
|
6ed7b2d4e79eebddf37a5b81b30b52475e64d68e
| 56,883
|
def custom_media(context, file_field):
"""
Convenience tag to prevent
{% if model.image %}{{ model.image.url }}{% endif %}
being littered everywhere and harming readability.
"""
if file_field:
return file_field.url
|
c00ee4c10c74699a74e40f2be4e71a652dbde619
| 56,886
|
def scale(value, source_range, dest_range, *args):
"""
Scale the input `value` from `source_range` to `dest_range`.\
Send argument `clamp` to limit output to destination range as well.
"""
s_range = source_range[1] - source_range[0]
d_range = dest_range[1] - dest_range[0]
if s_range == 0 or d_range == 0:
return dest_range[1]
else:
try:
if "invert" in args:
scaled_value = (1 - (value - source_range[0]) / s_range) * d_range + dest_range[0]
else:
scaled_value = ((value - source_range[0]) / s_range) * d_range + dest_range[0]
except ZeroDivisionError:
scaled_value = value * d_range + dest_range[0]
if "clamp" in args:
return max(dest_range[0], min(dest_range[1], scaled_value))
return scaled_value
|
e6ecf4b50a64da0b1e2231457b53067e2d3759ef
| 56,893
|
def slice_by_value(sequence, start=None, end=None, step=1):
"""Returns the earliest slice of the sequence bounded by the
start and end values. Omitted optional parameters work as expected
for slicing.
slice_by_value('hello there world', 'o', 'w', 2) -> 'otee'
"""
i_start = i_end = None
if start is not None:
i_start = sequence.index(start)
if end is not None:
i_end = sequence.index(end)
return sequence[i_start:i_end:step]
|
3e0e628de482377638fae463b5d92109cb3f76cb
| 56,894
|
def expand_addrmask(addrmask):
"""Expands a masked memory address to a list of all the applicable memory
addresses it could refer to. That is, a string like "00X1010X" could stand
for the binary numbers 00010100, 00010101, 00110100, or 00110101, which
correspond to decimal values 20, 21, 52, or 53, respectively. So if you
give this function the string "00X1010X", it will output the list
[20, 21, 52, 53]. """
out = [""]
for d in addrmask:
if d == "X":
d = "01"
# wow python list comprehensions are awesome. It took me a while to
# futz my way to the correct syntax but this is really smooth
out = [a + i for a in out for i in d]
return out
|
696f110de8066ae7da27d6d8dba06285c7bc44b7
| 56,902
|
def find_winners(candidates, quota):
"""Returns candidates that have met the quota."""
return [c for c in candidates if c.votes >= quota]
|
d37e82b64e59235b76095406c6a7ccee50435806
| 56,904
|
def is_blank(line):
"""
Returns true iff the line contains only whitespace.
"""
return line.strip() == ""
|
c67d51289d9e41fdcea75e78af80004902eeb245
| 56,905
|
def verification_run_events(base_verification_run_url: str) -> str:
"""
Build the URL to communicate with the verification run event API.
"""
return '/'.join([base_verification_run_url, 'events'])
|
895ffb070fb4f6071de6bbc5d8aebd199c497f2d
| 56,906
|
def perm_octal2str(perm_octal):
"""Convert octal permission int to permission string
Args:
perm_octal (int): octal-based file permissions specifier
Returns:
str: rwx--- type file permission string
"""
perm_str = ""
# add to perm_str starting with LSB and working to MSB
while len(perm_str) < 9:
perm = perm_octal & 0o07
perm_octal = perm_octal >> 3
if perm & 1:
perm_str = "x" + perm_str
else:
perm_str = "-" + perm_str
if perm & 2:
perm_str = "w" + perm_str
else:
perm_str = "-" + perm_str
if perm & 4:
perm_str = "r" + perm_str
else:
perm_str = "-" + perm_str
return perm_str
|
186063da337f7217baed6b3c1309c5a8e6ad1d70
| 56,907
|
def binary_1(number):
""" Returns number of 1s in binary expansion of the number """
count = 0
while number > 0:
if number % 2 == 1:
count = count + 1
number = number // 2
return count
|
36ac8c568753284ead00bff44ff82e74f966402d
| 56,913
|
def convertRange(val: float, old: tuple, new: tuple):
"""
Converts the range of a value to a new range.
Example
-------
convertRange(50, (0, 100), (0, 1))
>> 0.5
"""
return (((val - old[0]) * (new[1] - new[0])) / (old[1] - old[0])) + new[0]
|
7308b6c45b3ef587aab41e970108a1dc9e31e726
| 56,919
|
def transpose(grid: list[list[str]]) -> list[list[str]]:
"""Transpose a grid
Preconditions:
- grid != []
- grid[0] != []
- all({len(row1) == len(row2) for row1 in grid for row2 in grid})
"""
return [[grid[j][i] for j in range(len(grid))] for i in range(len(grid[0]))]
|
b9c416c3d52f0cc870aa3e9218c1b1e17619cd61
| 56,927
|
async def get_events(database):
"""Get events."""
events_query = 'select id, name, year from events order by year, name'
events = await database.fetch_all(events_query)
return [dict(e) for e in events]
|
d4c722ea329ee46ce0f428adc7e4f6fef3d7365f
| 56,929
|
def is_string(obj):
"""Check if an object is a string."""
return all(isinstance(elem, str) for elem in obj)
|
cd8c4436796b73e304d7b4ac346f30e1783eee1b
| 56,930
|
def run(result):
"""Function to test False return"""
return False
|
e48f03a8b6a603036f99493f1b63e0c50f71d9b6
| 56,931
|
def get_version_from_arguments(arguments):
"""Checks the arguments passed to `nox -s release`.
If there is only 1 argument that looks like a version, returns the argument.
Otherwise, returns None.
"""
if len(arguments) != 1:
return None
version = arguments[0]
parts = version.split(".")
if len(parts) != 3:
# Not of the form: MAJOR.MINOR.PATCH
return None
if not all(part.isdigit() for part in parts):
# Not all segments are integers.
return None
# All is good.
return version
|
b4d6cab975b272adddd8e814f90ec4b1c6cb39f5
| 56,932
|
from pathlib import Path
def is_project_package(path):
"""Directory looks like the base package for the project"""
path = Path(path)
has_init = (path / '__init__.py').is_file()
has_package = (path / 'package.py').is_file()
return has_init and has_package
|
e3825c61b75731ac2fa53a57c622acd5262ec084
| 56,933
|
def format_time(time):
"""Formats the time to a human-readable expression."""
m, s = divmod(round(time), 60)
h, m = divmod(m, 60)
return 'Hours: %s, Minutes: %s, Seconds: %s' % (h, m, s)
|
4cd001edd497af5d6b5e2971ca77403a5caa2af8
| 56,941
|
def make_adder(n):
"""Returns a functions that add n to the argument"""
def adder(val):
return val + n
return adder
|
68b114c3d33f189c747f8caec9d5fce8910c388b
| 56,942
|
def _centroid(gen):
"""Find the centroid of the coordinates given by the generator. The
generator should yield pairs (longitude, latitude).
:return: Pair (longitude, latitude) of the centroid.
"""
n, lon, lat = 0, 0, 0
for pair in gen:
lon += pair[0]
lat += pair[1]
n += 1
if n == 0:
raise ValueError("No points found to compute centroid from")
return lon / n, lat / n
|
27f4df5257023ce4f6f286bc3b8f7b0206c6f61b
| 56,946
|
def arg_range(arg, lo, hi):
"""
Given a string of the format `[int][-][int]`, return a list of
integers in the inclusive range specified. Open intervals are
allowed, which will be capped at the `lo` and `hi` values given.
If `arg` is empty or only contains `-`, then all integers in the
range `[lo, hi]` will be returned.
"""
arg = arg.strip()
if len(arg) == 0 or arg == '-':
return range(lo, hi+1)
if '-' not in arg:
return [int(arg)]
start, end = map(str.strip, arg.split('-'))
if len(start) == 0:
return range(lo, int(end)+1)
elif len(end) == 0:
return range(int(start), hi+1)
else:
return range(int(start), int(end)+1)
|
77dda7ac1fb3ad3f05aa50ac01045565e0bb426b
| 56,952
|
def write_query(seqdict, queryout):
"""Given a filled seqdict, writes the query."""
print('Creating amino acid query file.')
buffer = list()
with open(queryout, 'w') as query:
for gene, sequence in seqdict.items():
buffer.append('>{}\n{}'.format(gene, sequence))
if len(buffer) == 10000:
print('\n'.join(buffer), file=query)
buffer.clear()
print('\n'.join(buffer), file=query)
return None
|
2d09715cbb6cdc01af0a72e8266c580cc1b07c54
| 56,954
|
def all(iterable):
"""Return True if all elements of the iterable are true (or if it's empty).
This is a builtin in Python 2.5+, but doesn't exist in 2.4.
"""
for element in iterable:
if not element:
return False
return True
|
45cba24978c9966830d7ffb6272dba10aadfd5c5
| 56,960
|
def impute(data, scale=0.5):
"""
Replace any zeros in each row with a fraction of the smallest non-zero
value in the corresponding row.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes))
scale (optional; float)
Returns:
imputed data (pandas.DataFrame ~ (num_samples, num_genes))
"""
v = scale * data[data > 0].min(axis=1)
data_fill = data.fillna(0)
return data_fill + (data_fill == 0).multiply(v, axis=0)
|
38b50357c2e0bdc429dd2f3fbcf770dac5b20c83
| 56,968
|
from functools import reduce
def fuzzy_or(values):
"""
Applies fuzzy-or to a list of values
>>> fuzzy_or([0.5])
0.5
>>> fuzzy_or([0.5, 0.5])
0.75
>>> fuzzy_or([0.5, 0.5, 0.5])
0.875
"""
if min(values) < 0 or max(values) > 1:
raise ValueError("fuzzy_or expects values in [0,1]")
return reduce(lambda x, y: 1 - (1 - x) * (1 - y), values)
|
0015bb1d13502d55c485ce665606682c9254e578
| 56,969
|
def decomposeQuadraticSegment(points):
"""Split the quadratic curve segment described by 'points' into a list
of "atomic" quadratic segments. The 'points' argument must be a sequence
with length 2 or greater, containing (x, y) coordinates. The last point
is the destination on-curve point, the rest of the points are off-curve
points. The start point should not be supplied.
This function returns a list of (pt1, pt2) tuples, which each specify a
plain quadratic bezier segment.
"""
n = len(points) - 1
assert n > 0
quadSegments = []
for i in range(n - 1):
x, y = points[i]
nx, ny = points[i+1]
impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
quadSegments.append((points[i], impliedPt))
quadSegments.append((points[-2], points[-1]))
return quadSegments
|
5d96c5ab5dfaab6d9bf8602db47acfc1730f5e25
| 56,970
|
def insertion_sort(lis):
"""
Insert each element of unsorted list in the right location in sorted list
"""
for i in range(1, len(lis)):
j = i
while lis[j] < lis[j - 1] and j > 0:
lis[j], lis[j - 1] = lis[j - 1], lis[j]
j -= 1
return lis
|
3ccd0538d68e59045fffb4af8f6b7798cbd00a9f
| 56,975
|
def hex_to_rgb(hex):
"""Transform colors from hex to rgb"""
hex = hex.lstrip('#')
hlen = len(hex)
return tuple(int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3))
|
25113cba6a3b677996aa7ef521f46cb27a3ab2dd
| 56,981
|
def get_box_num(col_num, row_num):
"""Get the box number given the column and row."""
return 3 * int(row_num/3) + int(col_num/3)
|
677df8d6d625b98e2ee1dc83467e6a43ebf88d6e
| 56,985
|
def create_answer_space(x_coord, y_coord, radius, entry, variable, value, black_pixels):
"""Create an object representing an answer space"""
anser_space = {}
anser_space["x"] = x_coord
anser_space["y"] = y_coord
anser_space["radius"] = radius
anser_space["entry"] = entry
anser_space["variable"] = variable
anser_space["value"] = value
anser_space["black_pixels"] = black_pixels
return anser_space
|
ba381d40811e4f152cf1fbc82404d51ec554fe58
| 56,989
|
def breaks(n=0):
"""Return n number of newline characters"""
return "\n"*n
|
d16e44bf7deed76a5dd38539cdf68bad4db6f72f
| 56,997
|
def get_string_name_from_df(column_name, row, index, dataframe):
"""
In case a name in a row has the form of a datetime, it is automatically converted to a
datestring, which is not what we want.
Parameters
----------
column_name: str
Name of the column to extract
row: Series
Row from the dataframe from iterows
dataframe: DataFrame
Full DataFrma from the dataframe from iterows
Returns
-------
str:
Name obtained from the column
"""
col_str = row[column_name]
if not isinstance(col_str, str):
# in case a name has the form of a date/time, it is automatically converted by
# iterows. Convert to original
col_str = dataframe.loc[index, column_name]
return col_str
|
434d391f38aa27c427d32340a8ab9e3da11dc24a
| 56,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.