content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def clusters_to_pixel_set(clusters):
"""
Converts a list of clusters to a set of pixels.
This function has no callers and is usually used as a one-liner.
Parameters
----------
clusters : list of list of tuple
The outer list is a list of clusters. Each cluster is a list of (i, j)
tuples marking the position of significant points which belong to that
cluster.
Returns
-------
set of tuple
Each tuple is of the form (i, j) and marks the position of a significant
point in the clustering.
"""
return set().union(*clusters)
|
289f43c30416a4ab626c88bad17d6ad2e7594b3e
| 59,491
|
import socket
def ipfind(target_ip):
"""
Use the gethostbyname method to get ip address
"""
ip = socket.gethostbyname(target_ip)
return(ip)
|
3656d3e9073a34756ac1a53b9481815409be1636
| 59,503
|
import json
def _json_to_dict(json_path):
"""
Parse JSON file content.
Parameters
----------
json_path : str or pathlib.Path
Path to JSON file.
Returns
-------
dict
Dynophore data with the following keys and nested keys (key : value data type):
- id : str
- ligand_name : str
- ligand_smiles : str
- ligand_mdl_mol_buffer : str
- ligand_atom_serials : str
- superfeatures : list
- id : str
- feature_type : str
- atom_numbers : list of int
- occurrences : list of int
- envpartners : list
- id : str
- name : str
- atom_numbers : list of int
- occurrences : list of int
- distances : list of float
"""
with open(json_path, "r") as f:
json_string = f.read()
dynophore_dict = json.loads(json_string)
return dynophore_dict
|
a269dbcbeaee0397de37f4f57583beabcc7cab26
| 59,506
|
def extractLocatorFromCommand(command):
"""Given an command line to start a RAMCloud server return the
service locator specified for that server.
"""
tokens = command.split()
dashL = tokens.index('-L')
locator = tokens[dashL + 1]
return locator
|
f4371b51b36df5c7bceb472d9f57d1b528005990
| 59,507
|
import re
def remove_comments(mech_str):
""" remove comments from a CHEMKIN filestring
:param mech_str: CHEMKIN file contents
:type mech_str: str
:returns: commentr-free CHEMKIN filestring
:rtype: str
"""
comment = '!.*'
clean_mech_str = re.sub(comment, '', mech_str)
return clean_mech_str
|
79c0f464c44cd4431600dafad63d69a3713a3e53
| 59,508
|
def get_linestyle(experiment):
"""Select linestyle depending on experiment."""
if experiment == 'linear combination: AA, GHG':
linestyle = 'dotted'
else:
linestyle = '-'
return linestyle
|
42d4c7d74c6eb313113d05661bb9cbaafbf738c5
| 59,510
|
import random
def shuffle(inp: str, seed=None) -> str:
"""Randomly shuffle a string.
:param inp: String to shuffle
:param seed: random seed passed to random
:return: str, a new string consisting of inp randomly shuffled
>>> shuffle('abcdefghij', seed=3.1415)
'gdjhfcebia'
"""
random.seed(seed)
seq = list(inp)
random.shuffle(seq)
return ''.join(seq)
|
5ffecf4df930601b71bc21f38150b1c132ac6dcd
| 59,513
|
def has_conflicting_info(passages, qa_answers, nlg_answers):
"""
Checks whether an example has conflicting information
regarding its answerability
Args:
passages: list[{"is_selected": int, "passage_text": str}]
qa_answers: list[str]
nlg_answers: list[str]
Returns:
bool
"""
has_rel_passage = sum([p_info["is_selected"] for p_info in passages]) != 0
qa_avail = (qa_answers != ['No Answer Present.']) and (qa_answers != [""])
nlg_avail = nlg_answers != "[]"
# there is at least one nlg anser but no qa answer
if nlg_avail and not qa_avail:
return True
# there is at least one answer but no relevant passage
elif qa_avail and not has_rel_passage:
return True
# there is at least one relevant passage but no answer is available
elif has_rel_passage and not qa_avail:
return True
else:
return False
|
32a50dadf5007fdf44a677a0e2c72d30e001ccae
| 59,519
|
def _CombineBitloading(bitlines):
"""Combine bitloading information into one string.
Args:
bitlines: a list of lines of bitloading info:
00008888888888888888888888888888 00008888888888888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888000000000000 88888888888888888888000000000000
00000000000008888888888888888888 00000000000008888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888888888888000 88888888888888888888888888888000
Returns:
a tuple of two contiguous strings, '00008888888...888888000',
for the left-hand and right-hand bitloading.
"""
left = []
right = []
for line in bitlines:
(l, r) = line.split()
left.append(l.strip())
right.append(r.strip())
return (''.join(left), ''.join(right))
|
2f6ad8f61f26f53dcf1be60a7e6ece065a72d15e
| 59,520
|
def search_words_in_text(text, keywords):
"""Given a list of words return True if one or more
lines are in text, else False.
keywords format:
keywords = [
"word1 word2",
"word3",
"word4",
]
(word1 AND word2) OR word3 OR word4
"""
text = text.lower()
keywords = {str(k).lower() for k in keywords}
for line in keywords:
if all(True if w in text else False for w in line.split()):
return True
return False
|
fd4288f60cfc925a90beafc43de14e2b0afe1cfd
| 59,522
|
import re
def remove_entry_tag(text):
"""Replaces strings like ."""
return re.sub(r"!\[\]\(.*?\)", "", text)
|
f83cdfa4d1de553e52bbecde76da054ffcb80ddb
| 59,525
|
import math
def _cdf(x, mu, sd):
"""Cumulative density function for normal distribution """
return (1.0 + math.erf((x - mu) / (sd * math.sqrt(2.0)))) / 2.0
|
2c6de6905c28d127b69d505a6646809b666a7550
| 59,526
|
from typing import Set
def has_disjoint(left: Set[str], right: Set[str]) -> float:
"""Returns 1.0 if both sequences are non-empty but have no common values."""
if len(left) and len(right):
if set(left).isdisjoint(right):
return 1.0
return 0.0
|
5bb291af57599b6cd489f20b1c5511a37bb727c3
| 59,532
|
def read_convergence(outfile):
"""
Check for the four YES.
0: did not converge
1: forces and displacements converged
2: forces converged
"""
with open(outfile) as f:
lines = f.readlines()
for n, line in enumerate(lines):
if 'Item Value Threshold Converged?' in line:
if 'YES' in lines[n+1]:
if 'YES' in lines[n+2]:
if 'YES' in lines[n+3]:
if 'YES' in lines[n+4]:
return 1
else:
return 2
return 0
|
1be1c95d1893c94b2eca0eee3618be2f87e07f8a
| 59,535
|
def every(predicate, iterable):
"""
>>> every(lambda x: x > 1, [2, 0, 3])
False
>>> every(lambda x: x >= 0, [2, 0, 3])
True
"""
return all(map(predicate, iterable))
|
17bdabbb414cce56295cda1029f0fa476bf55057
| 59,537
|
def linear_force_fn(zero_intercept,
slope,
apply_distant_force=False,
apply_nearby_force=True):
"""Force is a linear function of distance.
Args:
zero_intercept: Scalar. Magnitude of force between sprites a distance
zero apart.
slope: Scalar. Slope of linear change in force with respect to distance.
apply_distant_force: Bool. Whether to apply force at distances greater
than the event horizon (distance at which the force is zero).
apply_distant_force: Bool. Whether to apply force at distances less than
the event horizon.
Returns:
force_fn: Function distance -> force magnitude.
"""
event_horizon = -1. * zero_intercept / slope
def force_fn(distance):
force_magnitude = zero_intercept + slope * distance
if not apply_distant_force and distance > event_horizon:
force_magnitude = 0
if not apply_nearby_force and distance < event_horizon:
force_magnitude = 0
return force_magnitude
return force_fn
|
874386fe9514ca09eb200aa71197e360e6a77e13
| 59,540
|
def generate_server_args_str(server_args):
""" Create a server args string to pass to the DBMS """
server_args_arr = []
for attribute, value in server_args.items():
value = str(value).lower() if isinstance(value, bool) else value
value = f'={value}' if value != None else ''
arg = f'-{attribute}{value}'
server_args_arr.append(arg)
return ' '.join(server_args_arr)
|
1a15df78312b5d60f75084dbb3dc42770a6d4d06
| 59,541
|
def slice(seq, start, stop=None, step=None):
""" Returns the start:stop:step slice of the sequence `seq`. """
return seq[start:stop:step]
|
66fb180bdc14a4e29757b297f1c48d00f55070c7
| 59,542
|
def no_decay(init, _final, _total_steps, _step):
"""
Return the init value, regardless of all others.
Parameters
----------
init: float
First value.
_final: Any
Unused value.
_total_steps: Any
Unused value.
_step: Any
Unused value.
Returns
-------
init: float
Value entered in the `init` parameter.
Notes
-----
Useful when it is needed a function that accepts four parameters as all others
decay functions, but it is no needed to compute any decay.
"""
return init
|
b5559c7c1bfcf5d28f999d1bb2f2884b79f4710d
| 59,543
|
def create_policy(api, configuration, api_version, api_exception, policy_name):
""" Creates a policy that inherits from the base policy
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:param policy_name: The name of the policy.
:return: A PoliciesAPI object for the new policy.
"""
# Create and configure a new policy
new_policy = api.Policy()
new_policy.name = policy_name
new_policy.description = "Inherits from Base policy"
new_policy.detection_engine_state = "off"
new_policy.auto_requires_update = "on"
# Create search criteria to retrieve the Base Policy
search_criteria = api.SearchCriteria()
search_criteria.field_name = "name"
search_criteria.string_test = "equal"
search_criteria.string_value = "%Base Policy%"
search_criteria.max_results = 1
# Create a search filter and pass the search criteria to it
search_filter = api.SearchFilter(None, [search_criteria])
# Search for the Base Policy
policies_api = api.PoliciesApi(api.ApiClient(configuration))
policy_search_results = policies_api.search_policies(api_version, search_filter=search_filter)
# Set the parent ID of the new policy to the ID of the Base Policy
new_policy.parent_id = policy_search_results.policies[0].id
# Add the new policy to Deep Security Manager
created_policy = policies_api.create_policy(new_policy, api_version)
return created_policy
|
30b4e5d549aaa7a4f879eda2a4c55b663a614b89
| 59,544
|
def find_boyer_moore(T,P):
"""Return the lowestindex of T at which substring P begins (or else -1)"""
n,m = len(T), len(P) # introduce convenient notations
if m == 0:
return 0 # trivial search for empty string
last = {} # build last dictionary
for k in range(m):
last[P[k]] = k # later occurence overwrites
# align end of pattern at index m-1 of text
i = m-1 # an index into T
k = m-1 # an index into P
while i < n:
if T[i] == P[k]: # a matching character
if k == 0:
return i # pattern begins at index i of text
else:
i -= 1 # examine previous character
k -= 1 # of both T and P
else:
j = last.get(T[i],-1) # last(T[i]) is -1 if not found
i += m - min(k, j+1) # case analysis for jump step
k = m - 1 # restart at the end of the pattern
return -1
|
00508c6976a350fbe1c78830ab76346daf651d9d
| 59,547
|
import torch
def box_transform_inv(box, deltas):
"""
apply deltas to box to generate predicted boxes
Arguments:
box -- tensor of shape (N, 4), boxes, (c_x, c_y, w, h)
deltas -- tensor of shape (N, 4), deltas, (σ(t_x), σ(t_y), exp(t_w), exp(t_h))
Returns:
pred_box -- tensor of shape (N, 4), predicted boxes, (c_x, c_y, w, h)
"""
c_x = box[:, 0] + deltas[:, 0]
c_y = box[:, 1] + deltas[:, 1]
w = box[:, 2] * deltas[:, 2]
h = box[:, 3] * deltas[:, 3]
c_x = c_x.view(-1, 1)
c_y = c_y.view(-1, 1)
w = w.view(-1, 1)
h = h.view(-1, 1)
pred_box = torch.cat([c_x, c_y, w, h], dim=-1)
return pred_box
|
c2c0abe3ddd6ff68e32b5e194a22454883085fcc
| 59,549
|
def gps(s, x):
"""
Calculates the maximum speed covered per hour in a list of distances traveled as described in
the __doc__ of this module
:param s: time in seconds
:param x: list of arbitrary distances
:return: maximum speed
:rtype: int
"""
# sanity check to return early if the length is less than or equal to 1, in short, less than 2
if len(x) < 2:
return 0
# using zip to calculate the differences between adjacent elements in the list
diff = [(3600.0 * (y - x)) / s for x, y in zip(x[:-1], x[1:])]
# return the maximum diff
return max(diff)
|
8adb670508f2398886965bbd2940a76a3d92a79d
| 59,555
|
def find_root(prog_info):
"""
Determine the base of the tower (it will be the only program
without a parent)
"""
return [prog for prog in prog_info.values() if prog.parent is None][0]
|
67802b39e44923e9bec69ea480eab2934b063415
| 59,560
|
def ld2dl(d):
""" List of dict to dict of list. """
if not d:
return d
d_out = {k: [] for dd in d for k in dd.keys()}
for o in d:
for k, v in o.items():
d_out[k].append(v)
return d_out
|
0a47546c376048738070e3d6719485ce7ad5a98b
| 59,562
|
def cli_method(func):
"""Decorator to register method as available to the CLI."""
func.is_cli_method = True
#setattr(func, 'is_cli_method', True)
return func
|
0eb80215e37b7b9290732dc1a8a2a33bbfb647ed
| 59,565
|
def parse_initiator_log(log):
"""Parse the initiator log to get --path and --auth-token values for the responder"""
path = None
token = None
chunks = log.split('\n')
for chunk in chunks:
stripped_chunk = chunk.strip()
if stripped_chunk.startswith("--path"):
path = stripped_chunk.split(' ')[1].strip()
elif stripped_chunk.startswith("--auth-token"):
token = stripped_chunk.split(' ')[1].strip()
return {'path': path, 'auth-token': token}
|
90947e2e1b768f7ba9ec3341ed1e13316adbe7b1
| 59,569
|
def version_specific(version, v3_subcon, v4_subcon):
"""
There are some differences in the parsing structure between v3 and v4. Some fields are different length and some
exist only in one of the versions. this returns the relevant parsing structure by version
:param version: int 3 or 4
:param v3_subcon: the parsing struct if version is 3
:param v4_subcon: the parsing struct if version is 4
"""
if version == 3:
return v3_subcon
else:
return v4_subcon
|
3f3a49e66f1d13707970756df02bc48064b7955a
| 59,572
|
def is_location_line(line):
"""Line from symbol table defines a location of a symbol definition."""
return line.startswith('Location')
|
fa002308b5d9e70a1a4d421e0aff9e9524ecd682
| 59,575
|
import math
def calculate_slider_step(
min_value: float, max_value: float, steps: int = 100
) -> float:
"""Calculates a step value for use in e.g. dcc.RangeSlider() component
that will always be rounded.
The number of steps will be atleast the number
of input steps, but might not be precisely the same due to use of the floor function.
This function is necessary since there is currently no precision control in the underlying
React component (https://github.com/react-component/slider/issues/275).
"""
return 10 ** math.floor(math.log10((max_value - min_value) / steps))
|
36a842994050f523ab162f1f3c7603c0837c82f0
| 59,580
|
def complement_modifiers(char: str):
"""Translate the additional sequence modifiers."""
if char == "/":
return "|"
elif char == "|":
return "/"
else:
return char
|
2445da8478f82765ef884ab1819f5eb894ed106e
| 59,583
|
def centroid_of_rect(roi):
""" Finds the coordinates for the centroid of a rectangle.
:param roi: rectangle region of interest as matrix
:return: coordinates of centroid within region of interest
"""
return int(roi.shape[0] / 2), int(roi.shape[1] / 2)
|
2af9c44c4fbd64a2233fbff0c808d6dd462592aa
| 59,585
|
import socket
import getpass
import logging
def bind_socket(socket_name=None):
"""
Find a socket to listen on and return it.
Returns (socket_name, sock_obj)
"""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if socket_name:
s.bind(socket_name)
return socket_name, s
else:
i = 0
while True:
try:
socket_name = '/tmp/pymux.sock.%s.%i' % (getpass.getuser(), i)
s.bind(socket_name)
return socket_name, s
except (OSError, socket.error):
i += 1
# When 100 times failed, cancel server
if i == 100:
logging.warning('100 times failed to listen on posix socket. '
'Please clean up old sockets.')
raise
|
4a0957b85fe63bc16101b4a9e1d61d013bac435e
| 59,587
|
from typing import List
from typing import Optional
def _find_priority(labels: List[str]) -> Optional[str]:
"""Find a priority value given label names."""
prio = {label: label for label in labels if label.startswith("priority")}
return (
prio.get("priority_critical")
or prio.get("priority_major")
or prio.get("priority_medium")
or prio.get("priority_low")
)
|
55e7071d0d1e7c8cee5677a872674b2a0cf39d32
| 59,589
|
from typing import Optional
from typing import Tuple
import time
def progress_data(start_time: Optional[float], iteration: Optional[int], total_iterations: Optional[int]) \
-> Tuple[Optional[Tuple[int, int, int, int]], Optional[float]]:
"""
Calculate elapsed time and progress.
:param start_time: Used for the elapsed time calculation. "Now" time point is :func:`time.perf_counter()`.
:param iteration: Used for progress calculation together with `total_iterations`. As this is meant as
and "iteration index", 1 is added before percentage calculation.
:param total_iterations: Used for progress calculation together with `iteration`.
:return: Tuple(elapsed time - hours; elapsed time - minutes; elapsed time - seconds; elapsed time - milliseconds)
if start time was provided, else `None`; iteration percentage, if `iteration` and `total_iterations`
were provided, else `None`)
"""
if start_time is not None:
elapsed_time = time.perf_counter() - start_time
elapsed_time_ms = 1000.0 * (elapsed_time - int(elapsed_time))
elapsed_time_h, elapsed_time_m = divmod(elapsed_time, 60 * 60)
elapsed_time_m, elapsed_time_s = divmod(elapsed_time_m, 60)
elapsed_time = (int(elapsed_time_h), int(elapsed_time_m), int(elapsed_time_s), int(elapsed_time_ms))
else:
elapsed_time = None
if (iteration is not None) and (total_iterations is not None):
# Iterations are mostly indexes ranging from [0, total_iterations).
progress = 100.0 * (iteration + 1) / total_iterations
else:
progress = None
return elapsed_time, progress
|
6cb5bd295237057811e56bb30ae10b7f93797008
| 59,600
|
import json
def from_json(data, **kwargs):
"""Reads data from json str.
:param str data: data to read
:param kwargs kwargs: kwargs for json loads
:return: read data
:rtype: dict
"""
return json.loads(data, **kwargs)
|
17af33000478bbb364ffca7676d46eb3b0afccfa
| 59,601
|
import math
def direction_diff(direction_a, direction_b):
"""Calculate the angle between two direction."""
diff = abs(direction_a - direction_b)
return diff if diff < math.pi else 2*math.pi - diff
|
0e8547c2426ad0a3a32b1d33b7934998fb10572f
| 59,603
|
import importlib
def get_class_from_path(class_path):
"""
Retrieve a class from a Python path string.
For example, get_class_from_path('users.models.User') would return the User class
(which you can then instantiate, use for `instanceof` tests, and so on).
"""
module_name, class_name = class_path.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name)
|
440e5a5ff8109a1cfccacc7f83fef50648c16b89
| 59,604
|
def write_buffer_to_file(buffer, filename):
"""Write a batch buffer to a file."""
with open(filename, 'wb') as of:
buffer.seek(0)
of.write(buffer.read())
return filename
|
b4baa80f235fd8f7a65d9e144590021ed0971724
| 59,607
|
def cvtStokesToIntensity(img_stokes):
"""
Convert stokes vector image to intensity image
Parameters
----------
img_stokes : np.ndarray, (height, width, 3)
Stokes vector image
Returns
-------
img_intensity : np.ndarray, (height, width)
Intensity image
"""
S0 = img_stokes[..., 0]
return S0*0.5
|
4db57353810c8dc232b38cf4c9bd3855a2216c94
| 59,609
|
def get_period_overlap(request_start, request_end, avail_start, avail_end):
"""Finds period of overlap between the requested time range and the
available period.
Parameters
----------
request_start: datetime-like
Start of the period of requested data.
request_end: datetime-like
End of the period of requested data.
avail_start: datetime-like
Start of the available data.
avail_end: datatime-like
End of available data.
Returns
-------
start, end: list of datetime or None
Start and end of overlapping period, or None if no overlap occurred.
"""
if request_start < avail_end and request_end > avail_start:
if request_start < avail_start:
start = avail_start
else:
start = request_start
if request_end > avail_end:
end = avail_end
else:
end = request_end
return [start, end]
else:
return None
|
a7967b3c9b92f06829cf22905e9acd7f64274e2f
| 59,611
|
def _compute_spect_slope_feat_names(data, **kwargs):
"""Utility function to create feature names compatible with the output of
:func:`mne_features.univariate.compute_energy_freq_bands`."""
n_channels = data.shape[0]
stats = ['intercept', 'slope', 'MSE', 'R2']
return ['ch%s_%s' % (ch, stat) for ch in range(n_channels)
for stat in stats]
|
450abecab0c5287c1a90507c8e65a507878ad2d9
| 59,617
|
def estimate_gk(y, theta, delta, ck):
"""Helper function to estimate gk from SPSA"""
# Generate Delta vector
delta_k = delta()
# Get the two perturbed values of theta
# list comprehensions like this are quite nice
ta = [t + ck * dk for t, dk in zip(theta, delta_k)]
tb = [t - ck * dk for t, dk in zip(theta, delta_k)]
# Calculate g_k(theta_k)
ya, yb = y(ta), y(tb)
gk = [(ya - yb) / (2 * ck * dk) for dk in delta_k]
return gk
|
942c2a9013c67ff6c1ebd692ce05a3f8634b0b27
| 59,619
|
import random
def say_hi(user_mention):
"""Say Hi to a user by formatting their mention"""
response_template = random.choice(['Sup, {mention}...',
'Yo!',
'Hola {mention}',
'Bonjour!'])
return response_template.format(mention=user_mention)
|
5c455920bdf56a9ce9445f652d4a618bfc5c8a7a
| 59,621
|
import torch
def get_default_dropout(p=0., dim=2, inplace=True):
"""Wrapper for torch's dropout.
"""
if dim == 1:
return torch.nn.Dropout(p=p, inplace=inplace)
elif dim == 2:
return torch.nn.Dropout2d(p=p, inplace=inplace)
elif dim == 3:
return torch.nn.Dropout3d(p=p, inplace=inplace)
else:
raise TypeError("Expected dim = 1, 2 or 3, but got {}".format(dim))
|
9ca838d5d431666bbee63d2e309faacd8c013736
| 59,622
|
def add(value1, value2):
"""Calculate the sum of value1 and value2."""
return value1 + value2
|
fb2c141c486c1c956825f6eb10500607137971d2
| 59,623
|
import torch
def int_to_device(device: int) -> torch.device:
"""
Return torch.device based on device index.
If -1 or less return torch.cpu else torch.cuda.
"""
if isinstance(device, torch.device):
return device
if device < 0:
return torch.device("cpu")
return torch.device(device)
|
f61e0b658cf9190787c76a8137938fc0d4db2fa4
| 59,625
|
import socket
def mk_socket(server=False):
"""Make TCP socket."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if server:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s
|
ed7d15abe6b01b444f0d04ed4b76a41fdd262204
| 59,627
|
def get_zscores(returns):
"""
Returns the Z-scores of the input returns.
Parameters
----------
returns : Series or DataFrame, required
Series or DataFrame of returns
Returns
-------
Series or DataFrame
"""
# Ignore 0 returns in calculating z score
nonzero_returns = returns.where(returns != 0)
z_scores = (nonzero_returns - nonzero_returns.mean())/nonzero_returns.std()
return z_scores
|
aa258a23b75bd67c0e9cf147d78473238990fafd
| 59,630
|
import math
def round_sigfigs(num, sig_figs):
"""Round to specified number of sigfigs.
>>> round_sigfigs(0, sig_figs=4)
0
>>> int(round_sigfigs(12345, sig_figs=2))
12000
>>> int(round_sigfigs(-12345, sig_figs=2))
-12000
>>> int(round_sigfigs(1, sig_figs=2))
1
>>> '{0:.3}'.format(round_sigfigs(3.1415, sig_figs=2))
'3.1'
>>> '{0:.3}'.format(round_sigfigs(-3.1415, sig_figs=2))
'-3.1'
>>> '{0:.5}'.format(round_sigfigs(0.00098765, sig_figs=2))
'0.00099'
>>> '{0:.6}'.format(round_sigfigs(0.00098765, sig_figs=3))
'0.000988'
"""
if num != 0:
return round(num, -int(math.floor(math.log10(abs(num))) - (sig_figs - 1)))
else:
return 0
|
7aa28f0102ac7c32a8cd66e55cef99318b53aa9a
| 59,633
|
def beta_pruning_actions(state):
""" Return actions available given a state, we're essentially creating
a binary tree, where each parent has two nodes to choose from
:param state: The current state
:return: The available actions given the current state
"""
match state:
case "a":
val = ["a1", "a2"]
case "b":
val = ["b1", "b2"]
case "c":
val = ["c1", "c2"]
case "d":
val = ["d1", "d2"]
case "e":
val = ["e1", "e2"]
case "f":
val = ["f1", "f2"]
case "g":
val = ["g1", "g2"]
case _:
val = [] # Cases e-m are leaves, so should have no possible actions
return val
|
e9865c0e1ade4d124bb27d491977e570e6df76ed
| 59,638
|
def find_reaction_set_index(i, species_reaction_sets):
"""Returns the index of the reaction set in species_reaction_sets where the
i-th reaction is found.
:param i: index of a reaction in a reaction set from species_reaction_sets
:param species_reaction_sets: list of species-specific CoMetGeNe reaction
sets
:return: index in species_reaction_sets for the reaction
"""
nb_reactions = 0
srs_index = 0
while nb_reactions < i:
reaction_set = species_reaction_sets[srs_index]
nb_reactions += len(reaction_set.reactions)
srs_index += 1
return srs_index - 1
|
fecd7057d6b698870a550a9db9e0b1b697d06b25
| 59,639
|
import re
def get_num_from_str(string):
"""
Given a string of a snippet, return the indexes of that snippet.
in: '[1, cython_backup] <RE>'
out: [1, cython_backup]
:param string: A string defining a snippet.
:return: The indexes of that snippet.
"""
# Get index portion of string
index = re.findall("\[\d+, \d+\]", string)
return eval(index[0])
|
0dacc5ba3149162bef651aaef76f99f29cc94ecf
| 59,644
|
def split_sequence(seq, n):
"""Generates tokens of length n from a sequence.
The last token may be of smaller length."""
tokens = []
while seq:
tokens.append(seq[:n])
seq = seq[n:]
return tokens
|
db33de052bd1c8349ad4e6c0eb8a9acd973bf9e0
| 59,651
|
def overlapping_carts(carts):
""" Return True if any of the CARTS occupy the same location. """
locations = [cart[0] for cart in carts]
return (len(set(locations)) < len(locations))
|
3549936809c30e3468d4c35b15984aa5667197de
| 59,656
|
def _left(i):
"""Gets index of the left child of a node."""
return (i << 1) + 1
|
498fae955e349a81a06f282d7e56801058333c98
| 59,658
|
def reformat_slice(
sl_in: slice,
limit_in: int,
mirror: bool) -> slice:
"""
Reformat the slice, with optional reverse operation.
Note that the mirror operation doesn't run the slice backwards across the
same elements, but rather creates a mirror image of the slice. This is
to properly accommodate the data segment reverse symmetry transform.
Parameters
----------
sl_in : slice
From prior processing, it is expected that `sl_in.step` is populated,
and `sl_in.start` is non-negative, and `sl_in.stop` is non-negative or
`None` (only in th event that `sl_in.step < 0`.
limit_in : int
The upper limit for the axis to which this slice pertains.
mirror : bool
Create the mirror image slice?
Returns
-------
slice
"""
if sl_in.step is None:
raise ValueError('input slice has unpopulated step value')
if sl_in.start is not None and sl_in.start < 0:
raise ValueError('input slice has negative start value')
if sl_in.stop is not None and sl_in.stop < 0:
raise ValueError('input slice has negative stop value')
if mirror:
# make the mirror image of the slice, the step maintains the same sign,
# and will be reversed by the format function
if sl_in.step > 0:
start_in = 0 if sl_in.start is None else sl_in.start
stop_in = limit_in if sl_in.stop is None else sl_in.stop
if sl_in.step > (stop_in - start_in):
step_in = stop_in - start_in
else:
step_in = sl_in.step
# what is the last included location?
count = int((stop_in - start_in)/float(step_in))
final_location = start_in + count*step_in
return slice(limit_in - final_location, limit_in - start_in, step_in)
else:
start_in = limit_in - 1 if sl_in.start is None else sl_in.start
stop_in = -1 if sl_in.stop is None else sl_in.stop
if sl_in.step < (stop_in - start_in):
step_in = stop_in - start_in
else:
step_in = sl_in.step
count = int((stop_in - start_in) / float(step_in))
final_location = start_in + count*step_in
return slice(limit_in - final_location, limit_in - start_in, step_in)
else:
return sl_in
|
31f9d0b301ef9b1b106ac61f2d0e7f980abed5ce
| 59,660
|
def is_valid_interval(interval):
"""Checks if the given interval is valid. A valid interval
is always a positive, non-zero integer value.
"""
if not isinstance(interval, int):
return False
if interval < 0:
return False
return True
|
18c41942e6b72251d618653516232eb74c8b5097
| 59,661
|
def svg_rect_to_path(x, y, width, height, rx=None, ry=None):
"""Convert rect SVG element to path
https://www.w3.org/TR/SVG/shapes.html#RectElement
"""
if rx is None or ry is None:
if rx is not None:
rx, ry = rx, rx
elif ry is not None:
rx, ry = ry, ry
else:
rx, ry = 0, 0
ops = []
ops.append(f"M{x + rx:g},{y:g}")
ops.append(f"H{x + width - rx:g}")
if rx > 0 and ry > 0:
ops.append(f"A{rx:g},{ry:g},0,0,1,{x + width:g},{y + ry:g}")
ops.append(f"V{y + height - ry:g}")
if rx > 0 and ry > 0:
ops.append(f"A{rx:g},{ry:g},0,0,1,{x + width - rx:g},{y + height:g}")
ops.append(f"H{x + rx:g}")
if rx > 0 and ry > 0:
ops.append(f"A{rx:g},{ry:g},0,0,1,{x:g},{y + height - ry:g}")
ops.append(f"V{y + ry:g}")
if rx > 0 and ry > 0:
ops.append(f"A{rx:g},{ry:g},0,0,1,{x + rx:g},{y:g}")
ops.append("z")
return " ".join(ops)
|
b1c5f5dea08d2d57634812e571f3c6e97331c050
| 59,665
|
def parse_bool(value):
"""
>>> parse_bool(1)
True
>>> parse_bool('off')
False
>>> parse_bool('foo')
"""
value = str(value).lower()
if value in ('on', 'true', 'yes', '1'):
return True
if value in ('off', 'false', 'no', '0'):
return False
|
46bd01b457be9aa6f26dd226ce3913a48cf5307d
| 59,666
|
import collections
def counter_convert(sent_tags, counter, zero_one=False):
"""Covert counter to 1, 2, and more categories
If zero_one is False, we divide the dataset into 6 categories:
Train Test Evaluation-Point
0 0 None
0 1 (0-1)
0 >1 (0-2) Zero-shot, performance on unseen entity
1 0 None
1 1 (1-1)
1 >1 (1-2) One-shot, performance on low frequency entity
>1 0 None
>1 1 (2-1)
>1 >1 (2-2) Training on normal data
If zero_one is True, we divide the dataset into 3 categories, in zero shot, one shot level:
Train Test Evaluation-Point
0 1 + >1 Zero-shot, performance on unseen entity
1 1 + >1 One-shot, performance on low frequency entity
>1 1 + >1 Training on normal data
"""
# convert fre-name to name-fre counter
name_counter = {}
for frequency, name_list in counter.items():
for name in name_list:
name_counter[name] = int(frequency)
# convert to training set counter: {'パーク建設': 3, '経産広告社': 1, ...}
sent_counter = collections.defaultdict(int)
for sent in sent_tags:
for tag in sent:
sent_counter[tag['text']] += 1
if not zero_one:
result = {'0-1': set(), '0-2': set(), '1-1': set(),
'1-2':set(), '2-1': set(), '2-2': set()}
# (0, xxx)
for name, count in name_counter.items():
if name not in sent_counter:
if name in counter['1']: # (0, 1)
result['0-1'].add(name)
else: # (0, 2)
result['0-2'].add(name)
for name, count in sent_counter.items():
# (1, xxx)
if count == 1:
if name in counter['1']: # (1, 0)
continue
elif name in counter['2']: # (1, 1):
result['1-1'].add(name)
else: # (1, 2)
result['1-2'].add(name)
# (2, xxx)
if count > 1:
if name in name_counter:
if count == name_counter[name]: # (2, 0)
continue
elif name_counter[name] - count == 1: # (2, 1)
result['2-1'].add(name)
else: # (2, 2)
result['2-2'].add(name)
else: # zero_one is True
result = {'0': set(), '1': set(), '2': set()}
for name in name_counter.keys():
if name not in sent_counter:
result['0'].add(name)
elif name in sent_counter and sent_counter[name] == 1:
result['1'].add(name)
else:
result['2'].add(name)
return result
|
13ff08d36ed90573ece6ff942df898d2581812ab
| 59,668
|
def transform_point(point, matrix):
"""Transform point by matrix.
:param list point: 2-item list
:param list matrix: 6-item list representing transformation matrix
:returns list: 2-item transformed point
"""
x, y = point
a, b, c, d, e, f = matrix
# This leaves out some unnecessary stuff from the fact that the matrix is
# homogenous coordinates.
new_x = x * a + y * c + e
new_y = x * b + y * d + f
return [new_x, new_y]
|
92c16f1698db3e9b2d754aefb0b44a8bef2cf783
| 59,670
|
def bracket_monotonic(f, x0=0.0, x1=1.0, factor=2.0):
"""Return `(x0, x1)` where `f(x0)*f(x1) < 0`.
Assumes that `f` is monotonic and that the root exists.
Proceeds by increasing the size of the interval by `factor` in the
direction of the root until the root is found.
Examples
--------
>>> import math
>>> bracket_monotonic(lambda x:10 - math.exp(x))
(0.0, 3.0)
>>> bracket_monotonic(lambda x:10 - math.exp(-x), factor=1.5)
(4.75, -10.875)
"""
assert abs(x1 - x0) > 0
assert factor > 1.0
f0 = f(x0)
f1 = f(x1)
if f1 < f0:
x0, x1 = x1, x0
f0, f1 = f1, f0
while f0*f1 >= 0:
x0, x1 = x1, x0 - factor*(x1-x0)
f0, f1 = f1, f(x1)
return (x0, x1)
|
fec0382fba3859538aed05e09e004c1be0f9ca21
| 59,673
|
def strip_keys(src_dict, keys_to_strip):
""" Removes certain keys from a dict """
return {key: value for key, value in src_dict.items() if key not in keys_to_strip}
|
47e63a6ce766334d25c8ee0a9ba68a82f0dfc3ae
| 59,674
|
import io
import yaml
def load_yaml_file(file):
"""
Load data from yaml file
:param file: Readable object or path to file
:type file: FileIO | str | unicode
:return: Yaml data
:rtype: None | int | float | str | unicode | list | dict
"""
if not hasattr(file, "read"):
with io.open(file, "r", encoding="utf-8") as f:
return yaml.load(f, yaml.FullLoader)
return yaml.load(file, yaml.FullLoader)
|
ab753e524278b6160fe9d8009925966d34430130
| 59,679
|
def replace_one(opstr: str, old: str, new: str) -> str:
"""Replace text ensuring that exactly one occurrence is replaced."""
count = opstr.count(old)
if count != 1:
raise Exception(
f'expected 1 string occurrence; found {count}. String = {old}')
return opstr.replace(old, new)
|
1d957ead574af78b371de59e3946e5801ce77b1f
| 59,681
|
import math
def rangle_to_cos_and_sin(angle: float):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Convert a angle in radians to its corresponding cos and sin values
PARAMETERS:
-----------
:param angle(float): The angle in radians to convert
RETURN:
-------
:return (Tuple[float, float]): Cos and sin values
:return: None
"""
return (math.cos(angle), math.sin(angle)), None
|
1570692ce34ec796c0a42bf8539c3d70a4a9dc5b
| 59,683
|
import string
def normalize(token_list):
"""Removing punctuation, numbers and also lowercase conversion"""
# Building a translate table for punctuation and number removal
# token_list = [re.findall(r'[0-9]+|[a-z]+', s) for s in token_list]
# token_list = reduce(operator.add, token_list)
punctnum_table = str.maketrans({c: None for c in string.punctuation + string.digits})
rawCorpus_punctnum_rem = [token.translate(punctnum_table) for token in token_list]
token_ed = [token.lower() for token
in rawCorpus_punctnum_rem if token]
return token_ed
|
e000456b57be0fd44e6c03c0162b39165fc48aa5
| 59,685
|
import math
def compute_sharpe_ratio(daily_rets, rf=0, freq='daily'):
"""
mean(Rp - Rf)
SR = -------------
std(Rp)
sharpe ratio is an annual measure, so:
SR_annualized = K * SR
K = square root of #samples of year
daily K = square root of 252
weekly K = square root of 52
monthly K = square root of 12
risk free rate:
1. LIBOR: London Interbank Offer Rate 伦敦银行同业拆放利率
2. 3mo T-bill: 3 month Treasury bill 3月期国债利率
3. 0%
FOR EXAMPLE, cacluate SR using daily data:
mean ( daily_rets - daily_risk_free_rate )
sharpe_ratio = ------------------------------------------
std ( daily_rets - daily_risk_free_rate )
SR = sqrt(252) * (daily_rets - daily_rf).mean() / daily_rets.std()
daily risk free rate traditional shortcut:
daily_rf = 252nd root of (1 + Rf)
:return:
"""
freqs = {'daily': 252, 'weekly': 52, 'monthly': 12, 'yearly': 1}
f = freqs[freq]
k = math.sqrt(f)
daily_rf = math.pow(1+rf, 1/f) - 1
sr = k * (daily_rets - daily_rf).mean() / daily_rets.std()
return sr
|
8d1b46042f8ee969ba359a98893412232562c555
| 59,689
|
def pad(s1):
"""
Pad a string to make it a multiple of blocksize.
If the string is already a multiple of blocksize, then simply return the string
"""
if len(s1) % 16 == 0:
return s1
else:
return s1 + "\x00"*(16 - len(s1) % 16)
|
3bc2f7cd228df88ef9e07d062620e17fbd1685c9
| 59,691
|
def apply_nt_offset(book_id):
"""
Offsets the book id to to the new testament equivalent.
e.g. 01 becomes 41 (Matthew)
:param book_id:
:return:
"""
return '{}'.format(int(book_id) + 40).zfill(2)
|
a3a5e7b68039339ae04becda28ea82a23dfadc20
| 59,694
|
def isShrine(info, iReligion):
"""
Returns True if <info> is the Shrine for <iReligion>.
"""
return info.getGlobalReligionCommerce() != -1
|
b8de78f61c06502ecd64bb79e8db3acd004efc6b
| 59,696
|
def normalise_constant(name):
"""
Assume the given name is a PCRE_* constant and normalise it.
"""
name = name.upper()
if not name.startswith('PCRE_'):
name = 'PCRE_' + name
return name
|
e66ac85ecd89ed8ebde688298f1eb45e63e49a06
| 59,697
|
def dBtoLinear(db):
"""Converts a log value to a linear value."""
return 10**(db/20);
|
120dd8b13cd4eb56de55cba86baa79bc1784cc2d
| 59,699
|
def _transform_type(t):
"""
Convert a type from a schema-def to a MySQL type
:param t: Type in schemadef
:return: MySQL equivalent
"""
if t.lower() == "boolean" or t.lower == "bool":
t = "tinyint(1)"
return t
|
b8228d3f464f33bcce1868e946879bf31017cde5
| 59,702
|
def get_dims(plot_objs):
"""
Gets the appropriate dimensions for the amount of objects going to be plotted.
Adds columns before rows.
"""
total = len(plot_objs)
row, col = 1, 1
while True:
if row * col >= total:
break
elif col == row:
col += 1
else:
row += 1
return (row, col)
|
d76494038e531769c7c1813b07e7d809a265a3f7
| 59,704
|
def copyoption(v):
"""Copies a dictionary and all its nested dictionaries and lists.
Much like copy.deepcopy it provides a deep copy of a dictionary
but instead of trying to copy everything it will only make a copy
of dictionaries, lists, tuples and sets. All the containers typically
used in configuration blueprints. All the other objects will be
preserved by reference.
"""
if isinstance(v, dict):
return v.__class__((k, copyoption(v[k])) for k in v)
elif isinstance(v, (list, set, tuple)):
return v.__class__(copyoption(e) for e in v)
else:
# Preserve anything else as is.
return v
|
6e541ee829081c3fc1ba6d5155595ab9d30a14ce
| 59,705
|
def JsonString_to_JsonFile(json_string,file_name="test.json"):
"""Transforms a json string to a json file"""
out_file=open(file_name,'w')
out_file.write(json_string)
out_file.close()
return file_name
|
ba916b585009bc348b5103e18c1025df8076c925
| 59,714
|
def __sbox_single_byte(byte, sbox):
"""S-Box substitution of a single byte"""
row = byte // 16
col = byte % 16
return sbox[row][col]
|
3091bf7da9d6365713f11f8659c56d0c815965ec
| 59,717
|
def dut(request):
"""Fixture for --dut."""
return request.config.getoption("--dut")
|
c909827e09b2de6ac5e51c602ec388853cb44f8f
| 59,719
|
import torch
def get_kth_fold_data(X_train, y_train, k, iteration):
""" Function used to extract the datasets for the k-th iteration of the k-fold cross validation algorithm """
# determine how many examples each fold gets
portion = X_train.shape[0] // k
# distinguish the cases and act accordingly
if iteration == 0:
X_val = X_train[0 : portion]
y_val = y_train[0 : portion]
_X_train = X_train[portion:]
_y_train = y_train[portion:]
elif iteration < k - 1:
X_val = X_train[iteration * portion : (iteration + 1) * portion]
y_val = y_train[iteration * portion : (iteration + 1) * portion]
left_X = X_train[:iteration * portion]
right_X = X_train[(iteration + 1) * portion:]
_X_train = torch.cat((left_X, right_X), 0)
left_y = y_train[:iteration * portion]
right_y = y_train[(iteration + 1) * portion:]
_y_train = torch.cat((left_y, right_y), 0)
else:
X_val = X_train[-portion:]
y_val = y_train[-portion:]
_X_train = X_train[:-portion]
_y_train = y_train[:-portion]
# return the target datasets
return _X_train, _y_train, X_val, y_val
|
0d954edff01ec09abfed77251a39fa6ca0cebc5e
| 59,721
|
def _get_return_value_type_name_from_line(line_str: str) -> str:
"""
Get the type name of return value from the target line string.
Parameters
----------
line_str : str
Target line string.
Returns
----------
return_value_type_name : str
Type name of return value.
"""
colon_exists: bool = ':' in line_str
if not colon_exists:
return_value_type_name: str = line_str.split(':')[0]
else:
return_value_type_name = line_str.split(':')[1]
return_value_type_name = return_value_type_name.strip()
return return_value_type_name
|
1327647128176e9168509d660769634da088c3a1
| 59,723
|
def format_monitoring_metric_per_metric_type(input, proper_timestamp):
"""Convert the Monitoring metric as provided from Translator component in format suitable for the InfluxDB.
The metric name is used for the InfluxDB measurement name, e.g.: cpu, memory
while its value, type and unit are defined as values.
The source origin (see `tag`) is defined as tag for search.
See the format of the monitoring metric coming from the Translator component: samples/input.json
Args:
input (dict): The metric as it was set by the translator component
proper_timestamp (str): The timestamp of the metric as provided from NVFI or app
Returns:
list: The monitoring metric in a format that the InfluxDB requires so as to be inserted.
"""
vim = input.get('mano', {}).get('vim', {})
ns = input.get('mano', {}).get('ns', {})
vnf = input.get('mano', {}).get('vnf', {})
vdu = input.get('mano', {}).get('vdu', {})
metric = input.get('metric', {})
monitoring_metric = [
{
"measurement": metric.get('name'),
"tags": {
"vim_uuid": vim.get('uuid'),
"vim_type": vim.get('type'),
"vim_name": vim.get('name'),
"vim_host": vim.get('url', "http://localhost"),
"origin": vim.get('tag', ""),
"ns_uuid": ns.get('id', None),
"ns_name": ns.get('nsd_name', None),
"nsd_id": ns.get('nsd_id', None),
"nsd_name": ns.get('nsd_name', None),
"vnf_uuid": vnf.get('id', None),
"vnf_name": vnf.get('name', None),
"vnf_short_name": vnf.get('short_name', None),
"vnfd_id": vnf.get('vnfd_id', None),
"vnfd_name": vnf.get('vnfd_name', None),
"vdu_uuid": vdu.get('id', None),
"vdu_image_uuid": vdu.get('image_id', None),
"vdu_flavor_vcpus": vdu.get('flavor', {}).get('vcpus', None),
"vdu_flavor_ram": vdu.get('flavor', {}).get('ram', None),
"vdu_flavor_disk": vdu.get('flavor', {}).get('disk', None),
"vdu_state": vdu.get('status', None), # new
"ip_address": vdu.get('ip_address', None), # new
# "mgmt-interface": vdu.get('mgmt-interface', None), # new
},
"time": proper_timestamp,
"fields": {
"value": metric.get('value', None),
"unit": metric.get('unit', None),
"type": metric.get('type', None)
}
}
]
return monitoring_metric
|
04e4dad181de4a7a9c2a968c463e45686e1cf7af
| 59,724
|
def seconds_to_seconds(seconds):
"""
Converts `seconds` to seconds as a :class:`float`.
"""
return float(seconds)
|
d50fea82dabd9825a4e746eb1f255e59e4011820
| 59,725
|
import base64
import json
def retrieve_jwt_expiration_timestamp(jwt_value):
# type: (str) -> int
"""
Retrieves the expiration value from the JWT.
:param str jwt_value: The JWT value.
:returns: int
"""
if not jwt_value:
raise ValueError("jwt_value must be a non-empty string.")
parts = jwt_value.split(".")
if len(parts) < 3:
raise ValueError("Invalid JWT structure. Expected a JWS Compact Serialization formatted value.")
try:
# JWT prefers no padding (see https://tools.ietf.org/id/draft-jones-json-web-token-02.html#base64urlnotes).
# We pad the value with the max padding of === to keep our logic simple and allow the base64 decoder to handle
# the value properly. b64decode will properly trim the padding appropriately, but apparently doesn't want to
# handle the addition of padding.
padded_base64_payload = base64.b64decode(parts[1] + "===").decode('utf-8')
payload = json.loads(padded_base64_payload)
except ValueError:
raise ValueError("Unable to decode the JWT.")
try:
exp = payload['exp']
except KeyError:
raise ValueError("Invalid JWT payload structure. No expiration.")
return int(exp)
|
d7bb2782a28ee89c50bb7a064aab35e3b9188aa0
| 59,726
|
def get_headers(req):
"""
validate that request headers contains user and tenant id
:param req: global request
:return: user and tenant id as strings
"""
user = req.headers.get('X-User-ID', None)
tenant = req.headers.get('X-Tenant-ID', None)
return user, tenant
|
5d96edb01be74e0bf742dc3e5cfb3f0431708bc1
| 59,733
|
def dictadd(dict_a, dict_b):
"""
Returns a dictionary consisting of the keys in `a` and `b`.
If they share a key, the value from b is used.
"""
result = {}
result.update(dict_a)
result.update(dict_b)
return result
|
36fc2d6dc3680472720f99354b0739bf3d73a435
| 59,736
|
def extract_subelements(vector):
""" Transform multiple element list into a 1D vector
Function 'extract_subelements' return [1,2,3,1] from an oririginal vector
like [[1,2,3], [1]]
Args:
vector (list of list): Original list of list
Returns:
extracted (list): Return 1D list
"""
extracted = []
for elem in vector:
for value in elem :
extracted.append(value)
return extracted
|
b647b91dc4c87048015216ec43798099c399bfa2
| 59,744
|
from typing import Iterable
from typing import List
def _combine_ranges(ranges: Iterable[range]) -> List[range]:
"""
Combine list of ranges to the most compact form.
Simple ranges with step 1 can always be combined when their
endpoints meet or overlap:
>>> _combine_ranges([range(7, 10), range(1, 4),
... range(2, 6), range(3, 5)])
[range(1, 6), range(7, 10)]
>>> _combine_ranges([range(2, 7), range(3, 5)])
[range(2, 7)]
>>> _combine_ranges([range(1, 3), range(3, 5)])
[range(1, 5)]
>>> _combine_ranges([range(1, 3), range(4, 5)])
[range(1, 3), range(4, 5)]
>>> _combine_ranges([range(1, 2), range(2, 3), range(3, 4)])
[range(1, 4)]
If step is larger than 1, then combining can only occur when the
ranges are in same "phase":
>>> _combine_ranges([range(1, 8, 3), range(10, 14, 3)])
[range(1, 14, 3)]
>>> _combine_ranges([range(1, 8, 3), range(9, 14, 3)])
[range(1, 8, 3), range(9, 14, 3)]
Ranges with different step are not combined:
>>> _combine_ranges([range(1, 8, 3), range(10, 14, 3),
... range(1, 20, 2)])
[range(1, 20, 2), range(1, 14, 3)]
Except if they have only a single item:
>>> _combine_ranges([range(1, 3), range(3, 4, 3), range(4, 6)])
[range(1, 6)]
Empty ranges are removed:
>>> _combine_ranges([range(1, 1), range(3, 4), range(6, 6)])
[range(3, 4)]
Empty input results in empty output:
>>> _combine_ranges([])
[]
"""
# Replace single item ranges with a range with step=1, remove
# empty ranges and sort by step and start
processed_ranges = sorted((
x if len(x) != 1 else range(x.start, x.start + 1, 1)
for x in ranges if x), key=(lambda x: (x.step, x.start, x.stop)))
if not processed_ranges:
return []
result: List[range] = []
last: range = processed_ranges[0]
for cur in processed_ranges[1:]:
# Enlarge the last range as long as the cur.start
# is within it or in the edge (= within last_enlarded)
last_enlarged = range(last.start, last.stop + last.step, last.step)
if cur.step == last.step and cur.start in last_enlarged:
last = range(last.start, max(last.stop, cur.stop), last.step)
else:
if last:
result.append(last)
last = cur
if last:
result.append(last)
return result
|
1d8fcf2e64b86203f35ae1cdf318676a0888ed01
| 59,752
|
def isanyinstance(o, classes):
"""calls isinstance on a list of classes.
true if any matches"""
for cls in classes:
if isinstance(o, cls):
return True
return False
|
3a8a006e92e7e343d8cad429c798ca31e103143a
| 59,761
|
def _get_return_value_name_from_line(line_str: str) -> str:
"""
Get the return value name from the target line string.
Parameters
----------
line_str : str
Target line string. e.g., 'price : int'
Returns
-------
return_value_name : str
Return value name. If colon character not exists in
line string, a blank string will be set.
"""
colon_exists: bool = ':' in line_str
if colon_exists:
return_value_name: str = line_str.split(':')[0]
else:
return_value_name = ''
return_value_name = return_value_name.strip()
return return_value_name
|
463f8caeb60b57703e15d5720013533d2c0c04f1
| 59,763
|
def modify_last(txt: str, n: int) -> str:
"""String with last character repeated n times."""
return f"{txt[:-1]}{txt[-1]*n}"
|
ae28cf82a42748ad4956aac223865dc7ead55e25
| 59,766
|
def imageFile_to_number(filename):
"""
find()方法:查找子字符串,若找到返回从0开始的下标值,若找不到返回-1
ws0.7_crowding1_n448_Ndisk57.png --> 448
"""
numberEndIndex = filename.find('_Ndisk')
filename = filename[18:numberEndIndex]
return filename
|
16b6e94b3e2dd926cafa811c2eac13e33fe484e7
| 59,768
|
def crop_array(top, left, bottom, right, array):
"""Crops an array (i.e., returns a subarray).
The indices for cropping should be given as arguments.
The order is ``top, left, bottom, right``.
For ``top`` and ``left``, they are the first indices to be included,
while ``bottom`` and ``right`` are the first indices to be dropped.
For instance, if you have the array::
arry = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
The top left :math:`2 \\times 2` array is obtained with::
>>> crop_array(0, 0, 2, 2, arry)
[[1, 2],
[4, 5]]
The command is equivalent to::
>>> array.copy()[top:bottom,left:right]
Parameters:
top: integer
first index from the top to be included
left: integer
first integer from the left to be included
bottom: integer
first index from the top to be left out
rigth: integer
first index from the left to be left out
"""
return array.copy()[top:bottom,left:right]
|
28da79f3a65a8e2af338ceb128899619c22486bf
| 59,775
|
import click
def selection_prompt(prompt: str, choices: list) -> str:
"""
Prompts the user to select an option from an list of choices.
:param prompt: The text of the prompt.
:param choices: A list of choices.
:return: The user's selection.
"""
joiner = "\n* "
selection = click.prompt(f"\n{prompt}\n\n"
f"* {joiner.join(choices)}",
type=click.Choice(choices, case_sensitive=False),
show_choices=False,
prompt_suffix="\n\n> ")
return selection.casefold()
|
e6b8221e790fb11ac217368042f96b52a220b637
| 59,782
|
def is_power(a, b):
""" This functions check if the number a is a power of b.
That is, if the following is true for a certain number n: a == b**n.
"""
if (a == 1):
return True
elif (a == b):
return True
elif (a == 0) and (b != 0):
return False
elif (a != 1) and (b == 1):
return False
else:
if (a % b == 0) and is_power((a/b), b):
return True
else:
return False
|
f7be6b794479bb538f569e3d7375d3d9a85a9753
| 59,783
|
def P(name, value):
"""Returns name and value in the format of gradle's project property cli argument."""
return '-P{}={}'.format(name, value)
|
766505d0b7e9e085d3ac97a499f5bbb738b63539
| 59,787
|
def u2nt_time(epoch):
"""
Convert UNIX epoch time to NT filestamp
quoting from spec: The FILETIME structure is a 64-bit value
that represents the number of 100-nanosecond intervals that
have elapsed since January 1, 1601, Coordinated Universal Time
"""
return int(epoch*10000000.0)+116444736000000000
|
fdb3798fc9f8b141d70b61ff1ae7b643938abf04
| 59,795
|
def read_region(region):
"""Return convenient region representation."""
chrom, grange = region.split(":")
start = int(grange.split("-")[0])
end = int(grange.split("-")[1])
return {"chrom": chrom, "start": start, "end": end}
|
3c3dce02009bf04a9be8b9b4e6e241d979170f9b
| 59,799
|
def build_perl_study_url(local_study_pk, edd_hostname, https=False):
"""
Builds a Perl-style study URL created by the older Perl version of EDD.
:param local_study_pk: the numeric study primary key
:param https: True to use HTTPS, False for plain HTTP
:param edd_hostname: the host name for EDD
"""
scheme = 'https' if https else 'http'
return '%(scheme)s://%(hostname)s/Study.cgi?studyID=%(study_pk)s' % {
'hostname': edd_hostname, 'scheme': scheme, 'study_pk': local_study_pk
}
|
f86133b839ef27b180168ff39d8d9baeacacad3c
| 59,801
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.