content stringlengths 42 6.51k |
|---|
def Cdf(_input_array, x):
"""
Compute the CDF value of input samples using left tail computation
Attributes:
_input_array: list of data points
x: current K value
Return: value of cdf
"""
# left tail
count = 0.0
for vi in _input_array:
if vi <= x:
count += 1.0
prob = count / len(_input_array)
return prob |
def _get_component_dropout(dropout_schedule, data_fraction):
"""Retrieve dropout proportion from schedule when data_fraction
proportion of data is seen. This value is obtained by using a
piecewise linear function on the dropout schedule.
This is a module-internal function called by _get_dropout_proportions().
See help for --trainer.dropout-schedule for how the dropout value
is obtained from the options.
Arguments:
dropout_schedule: A list of (data_fraction, dropout_proportion) values
sorted in descending order of data_fraction.
data_fraction: The fraction of data seen until this stage of
training.
"""
if data_fraction == 0:
# Dropout at start of the iteration is in the last index of
# dropout_schedule
assert dropout_schedule[-1][0] == 0
return dropout_schedule[-1][1]
try:
# Find lower bound of the data_fraction. This is the
# lower end of the piecewise linear function.
(dropout_schedule_index, initial_data_fraction,
initial_dropout) = next((i, tup[0], tup[1])
for i, tup in enumerate(dropout_schedule)
if tup[0] <= data_fraction)
except StopIteration:
raise RuntimeError(
"Could not find data_fraction in dropout schedule "
"corresponding to data_fraction {0}.\n"
"Maybe something wrong with the parsed "
"dropout schedule {1}.".format(data_fraction, dropout_schedule))
if dropout_schedule_index == 0:
assert dropout_schedule[0][0] == 1 and data_fraction == 1
return dropout_schedule[0][1]
# The upper bound of data_fraction is at the index before the
# lower bound.
final_data_fraction, final_dropout = dropout_schedule[
dropout_schedule_index - 1]
if final_data_fraction == initial_data_fraction:
assert data_fraction == initial_data_fraction
return initial_dropout
assert (initial_data_fraction <= data_fraction < final_data_fraction)
return ((data_fraction - initial_data_fraction)
* (final_dropout - initial_dropout)
/ (final_data_fraction - initial_data_fraction)
+ initial_dropout) |
def getThrusterFiringIntervalAfter(tfIndices, minDuration=10):
"""Get range of points between first two thruster firings in input
Thruster firings tend to cluster, so we don't just want the first
pair of firings in the array. Better is the first pair of firings that
are separated by a minimum number of cadecnces, minDuration
Input:
--------
tfIndices (1d np array)
A list of numbers, each number represents the index where a
thruster firing occurs. This is not a boolean array
Optional Input:
---------------
minDuration
A pair of cadences must be separated by at least this many cadences
to be returned.
Returns:
----------
Values for first two numbers separated by more than minDuration.
Example:
---------
``getThrusterFiringIntervalAfter( [1,3,15,29], 10)``
returns ``[3,15]``
"""
numTf= len(tfIndices)
if numTf == 0:
return None, None
if numTf == 1:
return tfIndices[0], -1
i = 0
while i < numTf:
if tfIndices[i] + minDuration < tfIndices[i+1]:
return tfIndices[i], tfIndices[i+1]
i += 1
return None, None |
def _format_time(seconds):
"""Render the integer number of seconds as a string. Returns a string.
"""
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
hours = int(hours)
minutes = int(minutes)
if hours > 0:
return "%sh %sm %ss" % (hours, minutes, seconds)
if minutes > 0:
return "%sm %ss" % (minutes, seconds)
return "%ss" % seconds |
def parse_config(config: dict) -> dict:
"""
parses the dictionary so that restructure function can understand it
:param config: unparsed raw dictionary of details
:return: parsed dictionary of details
"""
parsed_object = {}
for key in config:
for template in config[key]:
if type(template) == dict: # renaming of files
parsed_object[template['old']] = {
'dir': key,
'file': template['new']
}
else:
parsed_object[template] = {
'dir': key,
'file': template
}
return parsed_object |
def get_request_string(args, kwargs):
"""
Given ``args``, ``kwargs`` of original function, returns request string.
"""
return args[1] if len(args) > 1 else kwargs.get('request_string') |
def DropStringPrefix(s, prefix):
"""If the string starts with this prefix, drops it."""
if s.startswith(prefix):
return s[len(prefix):]
else:
return s |
def select_anchors_except_2d(tmp_ips_dict, mode):
"""
If the selected localization mode is not 2D, anchor selection
is done under this module.
"""
selected_id = list()
selected_anchors_dict = dict()
anchor_id_list = list(tmp_ips_dict['AnchorID'])
x_list = list(tmp_ips_dict['x'])
y_list = list(tmp_ips_dict['y'])
z_list = list(tmp_ips_dict['z'])
for j in range(mode + 1):
# Receive the first (mode + 1) of all signal received anchors.
selected_id.append(anchor_id_list[j])
selected_coords = list()
selected_coords.append(x_list[j])
selected_coords.append(y_list[j])
selected_coords.append(z_list[j])
selected_anchors_dict[selected_id[j]] = selected_coords
return selected_anchors_dict |
def get_sorted_labels(column_label_map):
"""
Sort labels using their corresponding column names.
.. function: get_sorted_labels(column_label_map)
:param column_label_map: The column-name-to-label map.
:type column_label_map: dict(str, str)
:return: The sorted labels.
:rtype: list(str)
"""
return [
item[1] for item in
sorted(column_label_map.items(), key=lambda item: item[0])
] |
def derivative_from_polycoefficients(coeff, loc):
"""
Return derivative of a polynomial of the form
f(x) = coeff[0] + coeff[1]*x + coeff[2]*x**2 + ...
at x = loc
"""
derivative = 0.
for n, c in enumerate(coeff):
if n == 0:
continue
derivative += n*c*loc**(n-1)
return derivative |
def show_slave_delays(slaves, args_array):
"""Method: show_slave_delays
Description: Stub holder for mysql_rep_failover.show_slave_delays func.
Arguments:
(input) slaves
(input) args_array
"""
status = True
if slaves and args_array:
status = True
return status, "Error Message" |
def learning_rate_decay(initial_learning_rate: float, epoch_no: int) -> float:
"""
param intial_learning_rate: the learning rate in the previous epoch
param epoch_no: current epoch_no
>>> lr = learning_rate_decay(1, 2)
>>> lr
0.0001
"""
decay_rate = 0.01
new_learning_rate = initial_learning_rate * decay_rate ** epoch_no
return new_learning_rate |
def fib(n):
""" This function calculate fib number.
Example:
>>> fib(10)
55
>>> fib(-1)
Traceback (most recent call last):
...
ValueError
"""
if n < 0:
raise ValueError('')
return 1 if n<=2 else fib(n-1) + fib(n-2) |
def lower(message: str) -> str:
"""Convert alphas in lowercase in the message.
Args:
message (str): the message to format.
Returns:
str: the formatted message.
"""
return message.lower() |
def filter_annotation_list(annotation_list, key, value_list, filter=True):
"""
Returns annotation list filtered to entries where _key_ is in _value_list_
"""
if filter:
return [x for x in annotation_list if x[key] in value_list]
else:
return [x for x in annotation_list if x[key] not in value_list] |
def _get_dhcpv6_msgtype(msg_index):
"""Return DHCPv6 message type string.
:param msg_index: Index of message type.
:return: Message type.
:type msg_index: int
:rtype msg_str: str
"""
dhcp6_messages = {
1: "SOLICIT",
2: "ADVERTISE",
3: "REQUEST",
4: "CONFIRM",
5: "RENEW",
6: "REBIND",
7: "REPLY",
8: "RELEASE",
9: "DECLINE",
10: "RECONFIGURE",
11: "INFORMATION-REQUEST",
12: "RELAY-FORW",
13: "RELAY-REPL"
}
return dhcp6_messages[msg_index] |
def _get_spliceregion(data):
"""
This is a plugin for the Ensembl Variant Effect Predictor (VEP) that
provides more granular predictions of splicing effects.
Three additional terms may be added:
# splice_donor_5th_base_variant : variant falls in the 5th base after the splice donor junction (5' end of intron)
# splice_donor_region_variant : variant falls in region between 3rd and 6th base after splice junction (5' end of intron)
# splice_polypyrimidine_tract_variant : variant falls in polypyrimidine tract at 3' end of intron, between 17 and 3 bases from the end
https://github.com/Ensembl/VEP_plugins/blob/release/89/SpliceRegion.pm
"""
return ["--plugin", "SpliceRegion"] |
def make_header(text, size=80, symbol="-"):
"""Unified way to make header message to CLI.
:param text: what text to write
:param size: Length of header decorative line
:param symbol: What symbol to use to create header
"""
header = symbol * size + "\n"
header += "%s\n" % text
header += symbol * size + "\n"
return header |
def remove_spaces(string):
""" Substitutes spaces for %20 which can be encoded properly in url """
return '%20'.join(string.split(' ')) |
def Color(red, green, blue):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (red << 16) | (green << 8) | blue |
def flatten_list(list_to_flatten):
"""Flatten a list of lists in one single list.
Parameters
----------
list_to_flatten : List of list. To flatten.
Returns
-------
flatted_list : List. Single list.
"""
flatted_list = [item for sublist in list_to_flatten for item in sublist]
return flatted_list |
def encode_file_path(s):
"""Encodes an URL path from internal format for use in disk filenames"""
# This doesn't change the string.
# But, if we ever change the internal representation of paths, we'll
# need to change the 3 functions here that deal with it
return s |
def check(row):
"""
Checks for human intervention in a plot
"""
if row['DSTRBCD1'] == 80.0:
return True
if row['DSTRBCD2'] == 80.0:
return True
if row['DSTRBCD3'] == 80.0:
return True
if row['TRTCD1'] == 10.0:
return True
if row['TRTCD1'] == 30.0:
return True
if row['TRTCD1'] == 50.0:
return True
if row['TRTCD2'] == 10.0:
return True
if row['TRTCD2'] == 30.0:
return True
if row['TRTCD2'] == 50.0:
return True
if row['TRTCD3'] == 10.0:
return True
if row['TRTCD3'] == 30.0:
return True
if row['TRTCD3'] == 50.0:
return True
return False |
def not_full_fieldofview(nx, ny, cellsize, fov):
"""
This has been raised as an interesting test, as if the full field of
view (FOV) has not been imaged we may want to image the full dataset.
The imaged FOV information can be estimated using the number of pixels
and the size of the pixels.
:param nx: number of pixels in x direction
:param ny: number of pixels in y direction
:returns: True if the full FOV is imaged, False otherwise
"""
return nx * ny * (cellsize/3600) * (cellsize/3600) < fov |
def _flatten_list(l):
""" convert multiple remotes of obs (each from multiple envs) to 1 list of obs
"""
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_] |
def dynamodb_prewrite_empty_str_in_dict_to_null_transform(d: dict) -> dict:
"""DynamoDB will break if you try to provide an empty string as a
String value of a key that is used as an index. It requires you to
provide these attributes as None rather than the empty string.
It _used_ to break if any attribute in any Map (nested or not) was the
empty String. This behavior seems to have changed relatively recently.
This function guards against this issue by simply replacing the
empty string with None.
"""
return {k: (v if not (isinstance(v, str) and not v) else None) for k, v in d.items()} |
def make_photo_dict(filename, media):
"""Generate a photo dict (filename, url) as in the ODKA JSON export."""
if filename and filename in media:
return dict(filename=filename, url=media[filename])
else:
return None |
def mac_address_formatter(mac_address):
"""Formats MAC address into Cisco MAC Address format and returns string"""
if '.' not in mac_address:
x = mac_address.replace(':', '').replace('-', '')
return f'{x[0:4]}.{x[4:8]}.{x[8:12]}'
else:
return mac_address |
def _check_quantiles(quantiles):
"""Validate quantiles.
Parameters
----------
quantiles : str, list, tuple or None
Either a string or list/tuple of strings indicating the pandas summary
functions ("mean", "min", "max", "median", "sum", "skew", "kurtosis",
"var", "std", "mad", "sem", "nunique", "count") that is used to summarize
each column of the dataset.
Returns
-------
quantiles : list or tuple
The validated quantiles that will be used to summarize the dataset.
"""
msg = """`quantiles` must be int, float or a list or tuple made up of
int and float values that are between 0 and 1.
"""
if isinstance(quantiles, (int, float)):
if not 0.0 <= quantiles <= 1.0:
raise ValueError(msg)
quantiles = [quantiles]
elif isinstance(quantiles, (list, tuple)):
if len(quantiles) == 0 or not all(
[isinstance(q, (int, float)) and 0.0 <= q <= 1.0 for q in quantiles]
):
raise ValueError(msg)
elif quantiles is not None:
raise ValueError(msg)
return quantiles |
def check_all_columns(A):
"""
Check if all columns in 2-dimensional matrix don't have more than one queen
"""
for col_inx in range(len(A)):
# compute sum of column col_inx
col_sum = 0
for row_inx in range(len(A)):
col_sum += A[row_inx][col_inx]
if col_sum > 1:
return False
return True |
def ae_level (val=None):
""" Set or get auto exposure level """
global _ae_level
if val is not None:
_ae_level = val
return _ae_level |
def calculate_compared_width(y_base, poly):
"""
Calculate the width of each polygon according to the baseline
Input
y_base: y coordinate of the base line
poly: a set of vertices of a polygon
Ouput
width: the width of a polygon
"""
width = 0
width_xs = []
for i in range(len(poly)):
x1, y1 = poly[i - 1][0], poly[i - 1][1]
x2, y2 = poly[i][0], poly[i][1]
if y_base == y1 == y2:
if abs(x1 - x2) > width:
width = abs(x1 - x2)
elif y_base != y1 != y2:
x = (y_base - y2) / (y1 - y2) * (x1 - x2) + x2
width_xs.append(x)
if max(width_xs) - min(width_xs) > width:
width = max(width_xs) - min(width_xs)
return width |
def str_to_color(s):
"""Convert hex string to color value."""
if len(s) == 3:
s = ''.join(c + c for c in s)
values = bytes.fromhex(s)
# Scale from [0-255] to [0-1]
return [c / 255.0 for c in values] |
def is_al_num(string):
"""
Little utility to check if a string contains only letters and numbers (a-z,A-Z,0-9)
:param string: The string to be processed
:return: Result
"""
for i in string.lower():
cond = ord('a') <= ord(i) <= ord('z')
cond = cond or (ord('0') <= ord(i) <= ord('9'))
if not cond:
return False
return True |
def adapt_cmake_command_to_platform(cmake_command, platform):
"""
Adapt CMake command to MS Windows platform.
"""
if platform == 'win32':
pos = cmake_command.find('cmake')
s = ['set %s &&' % e for e in cmake_command[:pos].split()]
s.append(cmake_command[pos:])
return ' '.join(s)
else:
return cmake_command |
def get_api(kind):
"""Determine the apiVersion for different kinds of resources
Args:
kind (string): The name of the resource
Returns:
string: the apiVersion for the matching resource
Raises:
ValueError: If apiVersion cannot be determined from Kind
"""
# supported workloads & their api versions
api_versions = {
"DaemonSet": "apps/v1",
"Deployment": "apps/v1",
"Job": "batch/v1",
"Pod": "v1",
"ReplicaSet": "apps/v1",
"StatefulSet": "apps/v1",
"Ingress": "networking.k8s.io/v1beta1",
"Service": "v1",
"PersistentVolume": "v1",
"PersistentVolumeClaim": "v1",
"Volume": "v1",
"Namespace": "v1",
"ConfigMap": "v1",
}
for resource, api in api_versions.items():
if kind == resource:
return api
raise ValueError(f"Could not determine apiVersion from {kind}") |
def mock_ssh(host, command):
"""Avoid network connection."""
return ["/bin/sh", "-c", command] |
def userfunc(say='Hi'):
"""Test func with one parameter."""
return 'SKPAR says {}'.format(say) |
def binary_search(items, item):
"""Return True if items includes the item, otherwise False.
Assume the items are in non-decreasing order.
Assume item and items are all of the same type.
"""
first = 0
last = len(items) - 1
while first <= last:
# Base case: the item is in the middle.
middle = (first + last) // 2
if items[middle] == item:
return True
# Reduction step: find the half where the item should be.
elif items[middle] < item:
# The item must be above the middle position.
first = middle + 1
else:
# The item must be below the middle position.
last = middle - 1
# Base case: the search space is empty.
return False |
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result) |
def xml_safe(s):
"""Returns the XML-safe version of a given string.
"""
new_string = s.replace("&", "&").replace("<", "<")
new_string = new_string.replace("\r", "").replace("\n", "<br/>")
return new_string |
def split_checkpoint_step(checkpoint_dir):
"""Helper function to return the checkpoint index number.
Args:
checkpoint_dir: Path directory of the checkpoints
Returns:
checkpoint_id: An int representing the checkpoint index
"""
checkpoint_name = checkpoint_dir.split('/')[-1]
return int(checkpoint_name.split('-')[-1]) |
def video_sort(videos, key, keyType=str, reverse=False):
"""
Given a list of video records that are dotty dictionaries return back a
sorted version that is sorted based on the provided key. The keys will be
converted using the given key type during the sort for comparison purposes.
This is a very simple wrapper on a standard function, but the plan is to
also implement filtering at some point in the future as well.
"""
return sorted(videos, key=lambda k: keyType(k[key]), reverse=reverse) |
def is_connection_error(error):
"""Returns true if the error is caused connection issues."""
msg = str(error)
return 'Unhealthy connection' in msg or 'No connection exists' in msg |
def is_around_angle(test, angle, offset):
"""
Checks if a test angle is close to the angle or not.
Parameters
----------
test : float
Angle to test in Degrees.
angle : float
Angle to compare in Degrees.
offset : float
Tolerance around 'angle' in degrees.
Returns
-------
bool
True if it is in the range [angle-offset,angle+offset].
"""
return (angle - offset) <= test <= (angle + offset) |
def reverse(s):
"""return the str that be reversed"""
if len(s) == 0: # basic case
return ""
return reverse(s[1:]) + s[0] |
def ip4_hex(arg, delimiter=""):
""" Convert an IPv4 address to Hexadecimal notation """
numbers = list(map(int, arg.split(".")))
return "{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}".format(
*numbers, sep=delimiter
) |
def token_from_http_body(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The raw token value to use in an AuthSubToken object.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Token='):
# Strip off Token= and return the token value string.
return response_line[6:]
return None |
def megahex_count(radius):
"""
counts from zero, so megahex radius 1 -> 6
Computes the maximum number on a given level
Computation based on the sequence of Hex (or centered hexagonal) numbers: 3*n*(n+1)+1
"""
return 3*radius*(radius+1) |
def f1_score(real_labels, predicted_labels):
"""
Information on F1 score - https://en.wikipedia.org/wiki/F1_score
:param real_labels: List[int]
:param predicted_labels: List[int]
:return: float
"""
assert len(real_labels) == len(predicted_labels)
# F1-score = 2 * (precision * recall) / (precision + recall) = tp / (tp + 1/2 * (fp + fn))
tp, fp, fn = 0, 0, 0
for i in range(len(real_labels)):
# True positive
if real_labels[i] == 1 and predicted_labels[i] == 1:
tp += 1
# False negative
elif real_labels[i] == 1 and predicted_labels[i] == 0:
fn += 1
# False positive
elif real_labels[i] == 0 and predicted_labels[i] == 1:
fp += 1
f1 = float(tp)/(tp+0.5*(fp+fn))
return f1 |
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)] |
def interpret_conf_limits(conf, name_prefix, info=None):
"""
Parses general parms for rate limits looking for things that
start with the provided name_prefix within the provided conf
and returns lists for both internal use and for /info
:param conf: conf dict to parse
:param name_prefix: prefix of config parms to look for
:param info: set to return extra stuff for /info registration
"""
conf_limits = []
for conf_key in conf:
if conf_key.startswith(name_prefix):
cont_size = int(conf_key[len(name_prefix):])
rate = float(conf[conf_key])
conf_limits.append((cont_size, rate))
conf_limits.sort()
ratelimits = []
conf_limits_info = list(conf_limits)
while conf_limits:
cur_size, cur_rate = conf_limits.pop(0)
if conf_limits:
next_size, next_rate = conf_limits[0]
slope = (float(next_rate) - float(cur_rate)) \
/ (next_size - cur_size)
def new_scope(cur_size, slope, cur_rate):
# making new scope for variables
return lambda x: (x - cur_size) * slope + cur_rate
line_func = new_scope(cur_size, slope, cur_rate)
else:
line_func = lambda x: cur_rate
ratelimits.append((cur_size, cur_rate, line_func))
if info is None:
return ratelimits
else:
return ratelimits, conf_limits_info |
def filefrac_to_year_monthday_fracday(filefracday):
""" split the name as YYYY, MM,DD, FFFFFF
FFFFFF (fraction in the day)"""
return filefracday[:4], filefracday[4:6], filefracday[6:8],filefracday[8:] |
def get_palette(num_cls):
"""
Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette |
def choose(paragraphs, select, k):
"""Return the Kth paragraph from PARAGRAPHS for which SELECT called on the
paragraph returns True. If there are fewer than K such paragraphs, return
the empty string.
Arguments:
paragraphs: a list of strings
select: a function that returns True for paragraphs that can be selected
k: an integer
>>> ps = ['hi', 'how are you', 'fine']
>>> s = lambda p: len(p) <= 4
>>> choose(ps, s, 0)
'hi'
>>> choose(ps, s, 1)
'fine'
>>> choose(ps, s, 2)
''
"""
# BEGIN PROBLEM 1
i = 0
for x in paragraphs:
if select(x):
if i == k:
return x
i += 1
return ''
# END PROBLEM 1 |
def bytes2MiB(bytes):
"""
Convert bytes to MiB.
:param bytes: number of bytes
:type bytes: int
:return: MiB
:rtype: float
"""
return bytes / (1024 * 1024) |
def areaTriangulo(base, altura):
"""Function that finds the area of a triangle given its width and height
Args:
base (float): the value for the width of the triangle
altura (float): the value for the height of the triangle
Returns:
float: The area of the triangle
"""
return (base * altura)/2 |
def _trimDict(row, column, evelist):
"""
function used to get only desired values form dictionary
"""
temdict = {k: row[column].get(k, None) for k in evelist}
dictout = {k: v for k, v in temdict.items() if not v is None}
return dictout |
def Question2(a):
"""Given a string a, find the longest palindromic substring contained in a. Your function definition should look like question2(a), and return a string."""
if len(a) == 0:
return "No String found"
s='$*'+'*'.join(a)+'*#'
p=[0]*len(s)
mirr,C,R,maxLPSIndex,maxLPS=0,0,0,0,0 #mirror #centerPositio #centerRightPosition
for i in range(1,len(s)-1):
mirr= 2 * C - i
if R > i:
p[i]=min(R - i,p[mirr])
while s[i+(p[i]+1)] == s[i - (p[i]+1)]:
p[i]+=1
if i + p[i] > R:
C = i
R = i+p[i]
if p[i] > maxLPS:
maxLPS=p[i]
maxLPSIndex=i
return "Input String :{} \n Longest Palindromic SubString {}".format(a,s[maxLPSIndex - p[maxLPSIndex]:maxLPSIndex + 1 +p[maxLPSIndex]].replace('*','')) |
def deployment_option_validator(x):
"""
Property: DeploymentStyle.DeploymentOption
"""
valid_values = ["WITH_TRAFFIC_CONTROL", "WITHOUT_TRAFFIC_CONTROL"]
if x not in valid_values:
raise ValueError(
"Deployment Option value must be one of: %s" % ", ".join(valid_values)
)
return x |
def obj2vl(spec):
"""reverse operator for vl2obj"""
vl_spec = {}
for f in spec:
if f == "encoding":
vl_spec[f] = {}
for l in spec[f]:
enc = l.copy()
channel = enc.pop("channel", None)
vl_spec[f][channel] = enc
else:
vl_spec[f] = spec[f]
return vl_spec |
def not_none(seq):
"""Returns True if no item in seq is None."""
for i in seq:
if i is None:
return False
return True |
def bytes_to_int_big_endian(data: bytes) -> int:
"""Converts bytes to integer in big endian mode"""
res = 0
for x in data:
res = (res << 8) | x
return res |
def get_3x3_homothety(x,y,z):
"""return a homothety 3x3 matrix"""
a= [x,0,0]
b= [0,y,0]
c= [0,0,z]
return [a,b,c] |
def _make_barrons_translation(x):
"""Apply function for a custom mapping of a text Barron's field to
a number"""
bar_dict = {
"Most Competitive+": 1,
"Most Competitive": 2,
"Highly Competitive": 3,
"Very Competitive": 4,
"Competitive": 5,
"Less Competitive": 6,
"Noncompetitive": 7,
"2 year (Noncompetitive)": 8,
"2 year (Competitive)": 8,
"Not Available": "N/A",
}
if x in bar_dict:
return bar_dict[x]
else:
return "?" |
def normalise_paths(paths):
"""Test normalising paths.
NB Paths on difference platforms might look different, so this
makes them comparable.
"""
return {pth.replace("/", '.').replace("\\", ".") for pth in paths} |
def slice2limits(slices):
"""
Create a tuple of minimum, maximum limits from a set of slices.
Parameters
----------
slices : list
List of slice objects which return points between limits
Returns
-------
limits: tuple, (ndarray, ndarray)
Two tuple consisting of array of the minimum and maximum indices.
See Also
--------
limits2slice : Find a list of slices given minimum and maximum limits.
"""
mins = [s.start for s in slices]
maxs = [s.stop - 1 for s in slices]
return mins, maxs |
def bc(val):
""" Convert bool to single letter T or F """
if val is True:
return "T"
return "F" |
def levenshtein(s1: str, s2: str) -> int:
"""
Pythonic levenshtein math to quickly determine how many "edits" two strings are
differently than one another.
Code snippet by Halfdan Ingvarsson
:param s1: String to compare
:param s2: String to compare
:return: int - number of edits required (higher number means more different)
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
# j+1 instead of j since previous_row and current_row are one character longer
# than s2
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] |
def passed(test_list):
"""find number of passed tests from a list [testsRuns, testFailures, testErrors]"""
return test_list[0] - (test_list[1] + test_list[2]) |
def float2str(flt, separator=".", precision=None, prefix=None, suffix=None):
"""
Converts a floating point number into a string.
Contains numberous options on how the output string should be
returned including prefixes, suffixes, floating point precision,
and alternative decimal separators.
Parameters
----------
flt:
separator:
precision:
prefix:
suffix:
Returns
-------
string: str
A string representation of the floating point number.
Examples:
---------
>>> float2str(23)
'23'
>>> float2str(23.5)
'23.5'
>>> float2str(23.5, separator="p")
'23p5'
>>> float2str(23.5, precision=4)
'23.5000'
>>> float2str(23.501345, precision=4)
'23.5013'
>>> float2str(23.5, precision=0)
'24'
>>> float2str(23.5, prefix='z', separator='p')
'z23p5'
>>> float2str(23.5, prefix 'z', separator='p', suffix='dex')
'z23p5dex'
"""
if isinstance(precision, int):
str_number = "{num:.{pre}f}".format(num=flt, pre=precision)
else:
str_number = str(flt)
if separator is not ".":
# Split number around the decimal point.
number_parts = str_number.split(".")
string = separator.join(number_parts)
else:
string = str_number
if isinstance(prefix, str):
string = "".join([prefix, string])
if isinstance(suffix, str):
string = "".join([string, suffix])
return string |
def calcular_distancia_entre_puntos(x1, x2, y1, y2):
"""
Calcular la distancia entre dos puntos
param: x1: longitud punto 1
param: x2: longitud punto 2
param: y1: latitud punto 1
param: y2: latitud punto 2
"""
xi = (x2 - x1) ** 2
yi = (y2 - y1) ** 2
return (xi + yi) ** (1/2) |
def multi_column_fields(fields, form):
"""
Return a dict with the number of columns per field...
This is usually 1, but may, in the context of some record in the
form, be more for a List (multiple choice) field.
"""
selected = {}
for name, field in fields:
selected[name] = set()
for record in form.values():
val = getattr(record, name, None)
if type(val) in (list, tuple, set):
selected[name].update(val)
else:
continue # ignore non-sequence (e.g. None) valuea
for ignore_value in ['', None]:
if ignore_value in selected[name]:
selected[name].remove(ignore_value)
# return key, count of union of unique values (justifying column-per):
return dict([(k, v) for k, v in selected.items() if len(v) > 1]) |
def config_id(c):
"""
Generates a unique name for each configuration
Parameters
----------
c: dict
A valid configuration for FastTextHandler
Returns
-------
A name for the input configuration `c`
"""
h = sorted(c.items(), key=lambda x: x[0])
return "_".join(["{0}={1}".format(k, v) for k, v in h if k[0] != '_']) |
def apply_label(row, labels, is_fabs):
""" Get special rule labels for required or type checks for FABS submissions.
Args:
row: the dataframe row to get the label for
labels: the list of labels that could be applied in this rule
is_fabs: a boolean indicating if the submission is a FABS submission or not
Returns:
The label if it's a FABS submission and the header matches one of the ones there are labels for, empty
string otherwise
"""
if is_fabs and labels and row['Field Name'] in labels:
return labels[row['Field Name']]
return '' |
def containedin(passwd, chars):
"""
See if all of the characters in the password are contained
in this list of chars
"""
for p in passwd:
if not p in chars:
return False
return True |
def bin(x, digits=0):
"""Get the binary for a decimal input.
Args:
x: Decimal input
digits: Number of digits for padding.
Returns:
A binary string, padded if necessary.
"""
# 2020-10-13 KWS Python 3 returns octal numbers with a prefix of 'o'. Need to remove this.
oct2bin = ['000','001','010','011','100','101','110','111']
binstring = [oct2bin[int(n)] for n in oct(x).replace('L','').replace('o','')]
return ''.join(binstring).lstrip('0').zfill(digits) |
def KK_RC23_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
) |
def _unpack_uint32(data):
"""Convert 4 bytes in little-endian to an integer."""
assert len(data) == 4
return int.from_bytes(data, 'little') |
def _get_laser_bias(time, campaigns, bias):
""" Map time (yr) to campaign to correction. """
camp = [ca for (t1, t2, ca) in campaigns if t1 <= time < t2] # ['c']|[]
laser = camp[0][:2] if camp else None
return bias[laser] |
def merge_rules(a, b):
"""Merge two rules."""
c = a.copy()
c.update(b)
return c |
def _valid_color(col):
"""Checks whether an rgba value is a valid color or not."""
for c in col:
if c < 0 or c > 1:
return False
return True |
def has_id(obj):
"""
Checks if the object has a getID method.
:param obj: Object to check
:return: <Boolean>
"""
if 'getID' in dir(obj):
return True
else:
return False |
def get_setup(job=None):
"""
Return the resource specific setup.
:param job: optional job object.
:return: setup commands (list).
"""
setup_commands = ['source /ccs/proj/csc108/athena_grid_env/setup.sh',
'source $MODULESHOME/init/bash',
'tmp_dirname=/tmp/scratch',
'tmp_dirname+="/tmp"',
'export TEMP=$tmp_dirname',
'export TMPDIR=$TEMP',
'export TMP=$TEMP',
'export LD_LIBRARY_PATH=/ccs/proj/csc108/AtlasReleases/ldpatch:$LD_LIBRARY_PATH',
'export ATHENA_PROC_NUMBER=16',
'export G4ATLAS_SKIPFILEPEEK=1',
'export PANDA_RESOURCE=\"ORNL_Titan_MCORE\"',
'export ROOT_TTREECACHE_SIZE=1',
'export RUCIO_APPID=\"simul\"',
'export RUCIO_ACCOUNT=\"pilot\"',
'export CORAL_DBLOOKUP_PATH=/ccs/proj/csc108/AtlasReleases/21.0.15/nfs_db_files',
'export CORAL_AUTH_PATH=$SW_INSTALL_AREA/DBRelease/current/XMLConfig',
'export DATAPATH=$SW_INSTALL_AREA/DBRelease/current:$DATAPATH',
'unset FRONTIER_SERVER',
' ']
return setup_commands |
def _normalize_name(name):
""" Return normalized event/function name. """
if '(' in name:
return name[:name.find('(')]
return name |
def _bunch(x, cls):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = _bunch({'urmom': {'sez': {'what': 'what'}}}, BunchDict)
>>> b.urmom.sez.what
'what'
bunchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = _bunch({ 'lol': ('cats', {'hah':'i win'}), 'hello': [{'french':'salut', 'german':'hallo'}]}, BunchDict)
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return cls((k, _bunch(v, cls)) for k, v in x.items())
elif isinstance(x, (list, tuple)):
return type(x)(_bunch(v, cls) for v in x)
else:
return x |
def _fixops(x):
"""Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
handled well by our simple top-down parser"""
if not isinstance(x, tuple):
return x
op = x[0]
if op == 'parent':
# x^:y means (x^) : y, not x ^ (:y)
# x^: means (x^) :, not x ^ (:)
post = ('parentpost', x[1])
if x[2][0] == 'dagrangepre':
return _fixops(('dagrange', post, x[2][1]))
elif x[2][0] == 'rangepre':
return _fixops(('range', post, x[2][1]))
elif x[2][0] == 'rangeall':
return _fixops(('rangepost', post))
elif op == 'or':
# make number of arguments deterministic:
# x + y + z -> (or x y z) -> (or (list x y z))
return (op, _fixops(('list',) + x[1:]))
return (op,) + tuple(_fixops(y) for y in x[1:]) |
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend(
[intervalx.start, intervalx.start, intervalx.end, intervalx.end, None]
)
ylist.extend(
[intervaly.start, intervaly.end, intervaly.end, intervaly.start, None]
)
else:
# XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist |
def linear(mu, c, i0=1.0):
"""
Calculates the intensity of a given cell in the stellar surface using a
linear limb-darkening law.
Parameters
----------
mu (``float`` or ``numpy.ndarray``):
Cosine of the angle between a line normal to the stellar surface and the
line of sight.
c (``float``):
Limb-darkening coefficient.
i0 (``float``, optional):
Intensity without limb-darkening. Default is 1.0.
Returns
-------
i_mu (``float`` or ``numpy.ndarray``):
Intensity with limb-darkening. The format is the same as the input
``mu``.
"""
attenuation = 1 - c * (1 - mu)
i_mu = i0 * attenuation
return i_mu |
def encode_rle(input):
"""
Gets a stream of data and compresses it
under a Run-Length Encoding.
:param input: The data to be encoded.
:return: The encoded string.
"""
if not input:
return ""
encoded_str = ""
prev_ch = ""
count = 1
for ch in input:
# Check If the subsequent character does not match
if ch != prev_ch:
# Add the count and character
if prev_ch:
encoded_str += str(count) + prev_ch
# Reset the count and set the character
count = 1
prev_ch = ch
else:
# Otherwise increment the counter
count += 1
else:
return encoded_str + (str(count) + prev_ch) |
def to_camel_case(text):
"""Convert to camel case.
>>> to_camel_case('example_code')
'exampleCode'
Args:
- text: str
Retrun: camel case of str
"""
split = text.split('_')
return split[0] + "".join(x.title() for x in split[1:]) |
def get_outer_grid(coordinate_list):
"""Get the boundaries of the outer grid for ease of plotting"""
x_coordinates = list(sorted(i[0] for i in coordinate_list))
y_coordinates = list(sorted(i[1] for i in coordinate_list))
min_x = x_coordinates[0] - 2
max_x = x_coordinates[-1] + 2
min_y = y_coordinates[0] - 2
max_y = y_coordinates[-1] + 2
return ((min_x, min_y), (max_x, max_y)) |
def _list_to_string(l, s):
"""Concatenates list items into a single string separated by `s`.
Args:
l: List with items to be concatenated into a single string.
s: String or char that will be concatenated in between each item.
Returns:
String that has all items in list `l` concatenated with `s` separator.
"""
return s.join(l) |
def _solve_method_2(a):
"""Use a dictionary."""
d = dict()
for item in a:
if not item in d:
d[item] = 1
else:
d[item] += 1
for k, v in d.items():
if v == 1:
return k |
def lower_keys(x):
"""Recursively make all keys lower-case"""
if isinstance(x, list):
return [lower_keys(v) for v in x]
if isinstance(x, dict):
return dict((k.lower(), lower_keys(v)) for k, v in x.items())
return x |
def get_quartile_data(number_of_simulations):
""" Take in the number of simulations and output the quartile line numbers. """
std_increment = round(number_of_simulations/100)
lower_quartile = round(std_increment*25)
middle_quartile = round(std_increment*50)
upper_quartile = round((std_increment*75))
quartile_tuple = (lower_quartile, middle_quartile, upper_quartile)
return quartile_tuple |
def dedupList(seq):
"""De-duplicate list"""
seqset = set(seq)
return list(seqset) |
def to_bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
URL: http://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python
"""
if str(value).lower() in ("yes", "y", "true", "t", "1"): return True
if str(value).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"): return False
raise Exception('Invalid value for boolean conversion: ' + str(value)) |
def ListUnion(list_1, list_2):
"""Returns the union of two lists. Python sets can have a non-deterministic
iteration order. In some contexts, this could lead to TensorFlow producing
two different programs when the same Python script is run twice. In these
contexts we use lists instead of sets. This function is not designed to be
especially fast and should only be used with small lists.
Args:
list_1: A list
list_2: Another list
Returns:
A new list containing one copy of each unique element of list_1 and
list_2. Uniqueness is determined by "x in union" logic; e.g. two
` string of that value appearing in the union.
Raises:
TypeError: The arguments are not lists.
"""
if not (isinstance(list_1, list) and isinstance(list_2, list)):
raise TypeError("Arguments must be lists.")
union = []
for x in list_1 + list_2:
if x not in union:
union.append(x)
return union |
def parenthesis_aware_split(string, delim=',', open_par='(', close_par=')'):
""" Split outside of parenthesis (i.e. ignore delimiters within parenthesis."""
out = []
s = ''
open_parenthesis=0
for c in string:
if c == open_par:
open_parenthesis+=1
if c == close_par and open_parenthesis > 0:
open_parenthesis-=1
if c == delim and open_parenthesis==0:
out += [s]
s = ''
else:
s += c
return out + [s] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.