content stringlengths 42 6.51k |
|---|
def phpencode(text, quotechar="'"):
"""convert Python string to PHP escaping
The encoding is implemented for
U{'single quote'<http://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.single>}
and U{"double quote"<http://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.double>}
syntax.
heredoc and nowdoc are not implemented and it is not certain whether this would
ever be needed for PHP localisation needs.
"""
if not text:
return text
if quotechar == '"':
# \n may be converted to \\n but we don't. This allows us to preserve pretty layout that might have appeared in muliline entries
# we might lose some "blah\nblah" layouts but that's probably not the most frequent use case. See bug 588
escapes = (("\\", "\\\\"), ("\r", "\\r"), ("\t", "\\t"), ("\v", "\\v"), ("\f", "\\f"), ("\\\\$", "\\$"), ('"', '\\"'), ("\\\\", "\\"))
for a, b in escapes:
text = text.replace(a, b)
return text
else:
return text.replace("%s" % quotechar, "\\%s" % quotechar) |
def _is_oom_error(error: RuntimeError) -> bool:
"""Check whether a runtime error was caused by insufficient memory."""
message = error.args[0]
# CUDA out of memory
if 'CUDA out of memory.' in message:
return True
# CPU out of memory
if "[enforce fail at CPUAllocator.cpp:64] . DefaultCPUAllocator: can't allocate memory:" in message:
return True
return False |
def centroid_points_xy(points):
"""Compute the centroid of a set of points lying in the XY-plane.
Warning
-------
Duplicate points are **NOT** removed. If there are duplicates in the
sequence, they should be there intentionally.
Parameters
----------
points : list of list
A sequence of points represented by their XY(Z) coordinates.
Returns
-------
list
XYZ coordinates of the centroid (Z = 0.0).
Examples
--------
>>> centroid_points_xy()
"""
p = len(points)
x, y = list(zip(*points))[:2]
return [sum(x) / p, sum(y) / p, 0.0] |
def get_runner_image_name(base_images_project, test_image_suffix):
"""Returns the runner image that should be used, based on
|base_images_project|. Returns the testing image if |test_image_suffix|."""
image = f'gcr.io/{base_images_project}/base-runner'
if test_image_suffix:
image += '-' + test_image_suffix
return image |
def possible_records(games):
"""
Input:
games (integer) - number of games played
Returns:
records (list of tuples) - list of possible records as tuples
"""
records = list()
for i in range(games + 1):
records.append((i, games-i))
return records |
def map_type(name, field_name):
"""Maps a Sysmon event type to VAST type."""
if name == "string":
return "string"
elif name == "integer":
if field_name == "SourcePort" or field_name == "DestinationPort":
return "port"
return "count"
elif name == "date":
return "timestamp"
elif name == "boolean" or name == "bool":
return "bool"
elif name == "ip":
return "addr"
else:
raise NotImplementedError(name) |
def cal_sort_key( cal ):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"]) |
def add_padding(main_str, padding_str, padding_length, inverse_padding=False):
"""
Add padding to a string either in the beginning or at the end
Args:
main_str (str): string to add padding to
padding_str (str): padding character as string
padding_length (int): the length of the final string should be a multiple of the padding length
inverse_padding (bool): add padding in the beginning rather than the end
Returns:
str: resulting string with padding
"""
missing_padding = len(main_str) % padding_length
if missing_padding:
if inverse_padding:
main_str = padding_str * (padding_length - missing_padding) + main_str
else:
main_str += padding_str * (padding_length - missing_padding)
return main_str |
def is_predicate(data: str) -> bool:
""" Returns true if predicate is a logical form predicate. """
return '@' in data |
def transcript_dna_char(base_char: str) -> str:
"""
A function that transcripts DNA char into mRNA char
returns '' when base char is not one of DNA bases
:param base_char:
:return rna_char:
"""
dict_of_dna_to_rna_base = {'A': 'U',
'T': 'A',
'C': 'G',
'G': 'C'}
try:
rna_char = dict_of_dna_to_rna_base[base_char]
except KeyError:
rna_char = ''
return rna_char |
def two_decimals(number: float):
"""Return passed number to two decimal places if not an integer,
otherwise return number as an integer,"""
if (number - int(number) != 0):
return "%.2f" % number
else:
return "%d" % number |
def multiples_of_3_5(stop):
"""Compute sum numbers in range(1, stop) if numbers are divided by 3 or 5."""
# numbers divided by 3 or 5 make cycle length 15.
frames = (stop - 1) // 15
# multiples for the first frame: 1-15
multiples = [i for i in range(1, 15 + 1) if i % 3 == 0 or i % 5 == 0]
frame_sum = sum(multiples)
# every next frame has sum increase 15 for every sum element
frame_increase = 15 * len(multiples)
# compute 0*frame_increase + 1*frame_increase + .... + (frames - 1) * frame_increase
# use equation for sum integers from 1 to n-1 which is n * (n - 1) /2
s = frames * frame_sum + (frames - 1) * frames // 2 * frame_increase
# add sum for ending part which is not full frame
for k in range(frames * 15 + 1, stop):
if k % 3 == 0 or k % 5 == 0:
s += k
return s |
def s_to_timestep(snapshot_years, snapshot_idx):
"""Convert snapshot index position to timestep.
Args:
snapshot_years (list): list of snapshot years.
snapshot_idx (int): index of snapshot
Returns:
snapshot_timestep (int): timestep of the snapshot
"""
return snapshot_years[snapshot_idx] - snapshot_years[0] |
def _CopyExcluding(er, s, t):
"""Return a copy of er, excluding all those involving triangles s and t.
Args:
er: list of (weight, e, tl, tr) - see _ERGraph
s: 3-tuple of int - a triangle
t: 3-tuple of int - a triangle
Returns:
Copy of er excluding those with tl or tr == s or t
"""
ans = []
for e in er:
(_, _, tl, tr) = e
if tl == s or tr == s or tl == t or tr == t:
continue
ans.append(e)
return ans |
def is_palindrome(values: list) -> bool:
"""
Function to check if a list of values is a palindrome
"""
begin, end = [0, len(values) - 1]
while begin < end:
if values[begin] != values[end]:
return False
begin, end = [begin + 1, end - 1]
return True |
def _get_version(version_info):
"""
converts version info tuple to version string
example:
(0, 1, 2, 'beta', 3) returns '0.1.2-beta.3'
"""
vi = [str(e) for e in version_info]
suffix = ''
if len(vi) > 3:
suffix = '-' + '.'.join(vi[3:])
version = '.'.join(vi[:3]) + suffix
return version |
def digital_root(n):
"""
Return the sum of the digits of n, repeated until one digit remains.
This happens to be the same as n % 9 except in the case where the
modulus is 0 and n is greater than 0, in which case the result
is 9.
"""
if n == 0:
return 0
mn = n % 9
return 9 if mn == 0 else mn |
def format_allocation(nr_nodes, nr_timestamps, allocation_matrix, start_matrix):
"""Generate the formatted dict of the allocation of tasks.
Args:
nr_nodes (int): Number of fog nodes.
nr_timestamps (int): Number of timestamps.
allocation_matrix (matrix): Matrix showing the time allocated for each task.
start_matrix (matrix): Matrix showing the start time of each task.
Returns:
[dict] : A formated dict of the allocation scheme.
"""
# Prepare variables
nr_tasks = len(allocation_matrix)
output = {}
for i in range(nr_nodes):
output["node_" + str(i)] = {}
# Define the fill string
fill_string = ''.join(['.' for i in range(len(str(nr_tasks)))])
# Add the occupancy rate for each node at each timestamp
for i in range(len(allocation_matrix)):
for j in range(len(allocation_matrix[0])):
if allocation_matrix[i][j] != 0:
output["node_" + str(j)][i] = [
fill_string[len(str(i)):] + str(i) if start_matrix[i][j] <= x < start_matrix[i]
[j] + allocation_matrix[i][j] else fill_string for x in range(nr_timestamps)]
return output |
def retrieve_components(command):
""" split the command and return its arguments """
command = command.strip(' ')
command = command.split(' ')
first_component = command.pop(0)
if first_component == "message" or first_component == "private":
return [command[0], ' '.join(command[1:])]
elif first_component == "broadcast":
return ' '.join(command)
elif len(command) != 1:
return command
else:
return command[0] |
def find_anchor_s(min_id_ind,
selected_x,
selected_y,
selected_z):
"""
Finds the anchor S using the minimum ID of selected anchors.
S anchor must be the anchor which has the minimum ID in selected
anchors.
"""
s_x = selected_x[min_id_ind]
s_y = selected_y[min_id_ind]
s_z = selected_z[min_id_ind]
anch_s = [s_x, s_y, s_z]
return anch_s |
def _get_db_subnet_group_arn(region, current_aws_account_id, db_subnet_group_name):
"""
Return an ARN for the DB subnet group name by concatenating the account name and region.
This is done to avoid another AWS API call since the describe_db_instances boto call does not return the DB subnet
group ARN.
Form is arn:aws:rds:{region}:{account-id}:subgrp:{subnet-group-name}
as per https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
"""
return f"arn:aws:rds:{region}:{current_aws_account_id}:subgrp:{db_subnet_group_name}" |
def transpose(v, offset):
"""
transpose vector format midi
discards note that go out of the 0, 127 range
"""
t = []
for note in v:
if note >= 128:
tn = note + offset
if tn >= 128 and tn < 256:
t.append(tn)
else:
t.append(note)
return t |
def sample_from_config(opt):
"""
Applies the distributions specified in the opt.json file
"""
output = {}
for param, dist in opt.items():
if isinstance(dist, dict):
output[param] = sample_from_config(dist)
elif isinstance(dist, list):
output[param] = [sample_from_config(d) for d in dist]
elif isinstance(dist, (str, float, int, bool)):
output[param] = dist # no sampling
else:
output[param] = dist.rvs()
return output |
def rowSum(mtx):
"""Return all row-sums as a list"""
try:
for i in range(0, len(mtx)):
assert len(mtx[i]) == len(mtx[i-1]) # check whether each list has the same length.
res = list()
for j in range(0, len(mtx[0])):
tmp = 0
for i in range(0, len(mtx)):
tmp = tmp + mtx[i][j]
res.append(tmp)
return(res)
except AssertionError as detail:
return ('Length of lists is irregular or input format is wrong.')
except TypeError as detail:
return ('Undefined operand type') |
def CheckHeader(inFile):
"""
IN: Data to search
Out: Bool and file offset where NULL was found
Description:
This function attempts to determine if the PlugX input file is XOR encoded. XOR encoded files typically have the key in the 1st 10 bytes followed by a NULL
If we don't have a NULL after 40 bytes, we bail and assume it's not XOR encoded or unknown.
Returns the position found
"""
for pos in range(40):
if inFile[pos] == 0x00:
#See if the XOR key repeats in the file. A typical key is 10 bytes in length (up until the NULL byte)
KeyCheck = inFile[pos:].find(inFile[pos:])
if KeyCheck == -1: #Key was not found
return False,0
return True,pos
return False,0 |
def convert_strlist_intlist(str_labels):
"""
This fucntion is used to change str_type labels
to index_labels
params: string type list labels <list>
int_labels: int type list labels <list>
search_index: string type unique lables <list>
```python
>>> a = ["a", "b", "c", "c"]
>>> int_labels, search_index = convert_strlist_intlist(a)
>>> int_labels
>>> [0, 1, 2, 2]
>>> search_index
>>> ["a", "b", "c"]
```
"""
search_index = []
int_labels = []
for _ in str_labels:
if _ in search_index:
int_labels.append(search_index.index(_))
else:
search_index.append(_)
int_labels.append(len(search_index)-1)
return int_labels, search_index |
def edge_direction(point0, point1):
"""
:return: A vector going from point0 to point1
"""
return point1[0] - point0[0], point1[1] - point0[1] |
def toStr(pipeObj):
"""Create string representation of pipeline object."""
res = ''
lastIdx = len(pipeObj) - 1
for i, c in enumerate(pipeObj):
if c[0]:
res += c[0] + '('
res += toStr(c[1])
res += ')'
else:
res += c[1]
if i != lastIdx:
res += ','
return res |
def get_time_slot(hour, minute):
"""
Computes time_slot id for given hour and minute. Time slot is corresponding to 15 minutes time
slots in reservation table on Sutka pool web page (https://www.sutka.eu/en/obsazenost-bazenu).
time_slot = 0 is time from 6:00 to 6:15, time_slot = 59 is time from 20:45 to 21:00.
:param hour: hour of the day in 24 hour format
:param minute: minute of the hour
:return: time slot for line usage
"""
slot_id = (hour - 6)*4 + int(minute/15)
return slot_id |
def _sigdigs(number, digits):
"""Round the provided number to a desired number of significant digits
:arg: number = A floating point number
digits = How many significant digits to keep
:returns: A formatted currency string rounded to 'digits' significant digits.
Example: _sigdigs(1234.56, 3) returns $1,230"""
float_format = "{:." + str(digits) + "g}"
return "${:,.0f}".format(float(float_format.format(number))) |
def binary_search(data, target, low, high):
"""Return True if target is found in indicated portion of a Python list.
The search only consider the portion from dat[low] to data[high] inclusive.
"""
if low > high:
return False
else:
mid = (low + high) // 2
if target == data[mid]:
# print('Target present at:', mid)
return True
elif target < data[mid]:
# Recur to the protion left of the middle
return binary_search(data, target, low, mid - 1)
else:
# Recur on the portion right of the middle
return binary_search(data, target, mid+1, high) |
def sensitivity_analysis_payload(
variable_parameter_name="",
variable_parameter_range="",
variable_parameter_ref_val="",
output_parameter_names=None,
):
"""format the parameters required to request a sensitivity analysis in a specific JSON"""
if output_parameter_names is None:
output_parameter_names = []
return {
"sensitivity_analysis_settings": {
"variable_parameter_name": variable_parameter_name,
"variable_parameter_range": variable_parameter_range,
"variable_parameter_ref_val": variable_parameter_ref_val,
"output_parameter_names": output_parameter_names,
}
} |
def hex_to_char(s):
"""Return the unicode string of the 5-digit hex codepoint."""
return chr(int(s[1:], 16)) |
def explode(screen):
"""Convert a string representing a screen
display into a list of lists."""
return [list(row) for row in screen.split('\n')] |
def getConfiguration(basePath, recursive=False):
"""Retrieves Tags from the Gateway as Python dictionaries.
These can be edited and then saved back using system.tag.configure.
Args:
basePath (str): The starting point where the Tags will be
retrieved. This can be a folder containing, and if recursive
is True, then the function will attempt to retrieve all of
the tags in the folder.
recursive (bool): If True, the entire Tag Tree under the
specified path will be retrieved. Note that this will only
check one level under the base path. True recursion would
require multiple uses of this function at different paths.
Optional.
Returns:
dict: A List of Tag dictionaries. Nested Tags are placed in a
list marked as "tags" in the dictionary.
"""
print(basePath, recursive)
return None |
def add_probabilities(q_0, dfa, T, g_pos):
"""
This code adds probabilities to each of the intermediate nodes.
"""
pos_prob = {}
total = {}
for tau,g in T:
q = q_0
q_vis = set([q])
if q not in pos_prob:
pos_prob[q] = 0
for sigma in tau:
if (q,sigma) in dfa:
q = dfa[(q,sigma)]
q_vis.add(q)
if q not in pos_prob:
pos_prob[q] = 0
# adding the visited states to the total count
for q in q_vis:
if g == g_pos:
pos_prob[q] += 1
if q not in total:
total[q] = 0
total[q] += 1
# normalizing the probabilities
for q in pos_prob:
pos_prob[q] = float(pos_prob[q]) / total[q]
return pos_prob |
def last(path):
"""Returns a last "part" of a path.
Examples:
last('abc/def/ghi') => 'ghi'
last('abc') => 'abc'
last('') => ''
"""
if '/' not in path:
return path
return path.split('/')[-1] |
def v2_tail(sequence, n):
"""Return the last n items of given sequence.
This fixes the previous problem...
"""
if n == 0:
return []
return list(sequence[-n:]) |
def make_name(estimator):
"""Helper function that returns the name of estimator or the given string
if a string is given
"""
if estimator is not None:
if isinstance(estimator, str):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = None
return estimator_name |
def select_case(select_key, next_state):
"""
This method returns select case for a parser
:param select_key: the action to read registers
:type select_key: str
:param next_state: the next state associated with the select key
:type next_state: str
:returns: str -- the code in plain text
:raises: None
"""
return '\t{0: <8}: {1};\n'.format(select_key, next_state) |
def create_numeric_classes(lithologies):
"""Creates a dictionary mapping lithologies to numeric code
Args:
lithologies (iterable of str): Name of the lithologies
"""
my_lithologies_numclasses = dict([(lithologies[i], i) for i in range(len(lithologies))])
return my_lithologies_numclasses |
def as_float(prod):
"""Convert json values to float and sum all production for a further use"""
prod['total'] = 0.0
if isinstance(prod, dict) and 'yuk' not in prod.keys():
for prod_type, prod_val in prod.items():
prod[prod_type] = float(prod_val)
prod['total'] += prod[prod_type]
return prod |
def _gen_key_value(size: int) -> bytes:
"""Returns a fixed key_value of a given size."""
return bytes(i for i in range(size)) |
def email_bodies(emails):
"""Return a list of email text bodies from a list of email objects"""
body_texts = []
for eml in emails:
body_texts.extend(list(eml.walk())[1:])
return body_texts |
def _to_pydim(key: str) -> str:
"""Convert ImageJ dimension convention to Python/NumPy."""
pydims = {
"Time": "t",
"slice": "pln",
"Z": "pln",
"Y": "row",
"X": "col",
"Channel": "ch",
}
if key in pydims:
return pydims[key]
else:
return key |
def create_geocoding_api_request_str(street, city, state,
benchmark='Public_AR_Census2010',
vintage='Census2010_Census2010',
layers='14',
format='json') -> str:
"""Create geocoding api request str
Args:
street (str): street address
city (str): city
state (str): state as 2 digit initial
benchmark (str, optional): Defaults to 'Public_AR_Census2010'.
vintage (str, optional): Defaults to 'Census2010_Census2010'.
layers (str, optional): Defaults to '14'.
format (str, optional): Defaults to 'json'.
Returns:
str: geocoding api request string.
"""
return 'https://geocoding.geo.census.gov/geocoder/geographies/address?street=' + \
street + '&city=' + city + '&state=' + state + '&benchmark=' + benchmark + \
'&vintage=' + vintage + '&layers=' + layers + '&format=' + format |
def ignore_headers(headers_to_sign):
"""
Ignore headers.
"""
# Excerpts from @lsegal -
# https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
#
# User-Agent:
#
# This is ignored from signing because signing this causes problems
# with generating pre-signed URLs (that are executed by other agents)
# or when customers pass requests through proxies, which may modify
# the user-agent.
#
# Content-Length:
#
# This is ignored from signing because generating a pre-signed URL
# should not provide a content-length constraint, specifically when
# vending a S3 pre-signed PUT URL. The corollary to this is that when
# sending regular requests (non-pre-signed), the signature contains
# a checksum of the body, which implicitly validates the payload
# length (since changing the number of bytes would change the
# checksum) and therefore this header is not valuable in the
# signature.
#
# Content-Type:
#
# Signing this header causes quite a number of problems in browser
# environments, where browsers like to modify and normalize the
# content-type header in different ways. There is more information
# on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding
# this field simplifies logic and reduces the possibility of bugs.
#
# Authorization:
#
# Is skipped for obvious reasons
ignored_headers = ['Authorization', 'Content-Length',
'Content-Type', 'User-Agent']
for ignored_header in ignored_headers:
if ignored_header in headers_to_sign:
del headers_to_sign[ignored_header]
return headers_to_sign |
def num_range(n):
"""Generate a range of numbers around the target number `n` to use in a multiple choice question. """
return range(n - 32, n + 32) |
def remove_right_side(argument: str) -> str:
"""Removes default arguments so their names can be used as values or names of variables."""
return argument.split("=")[0] |
def clean(dictionary: dict):
"""
Used to remove inconsistencies such as word_tag1-tag2
Converts it to word_tag1 and word_tag2
"""
clean_dictionary = dictionary.copy()
for key, value in dictionary.items():
word, tag = key.split('_', 1)
tag_parts = tag.split('-')
if len(tag_parts) > 1:
factor = len(tag_parts)
value = value / factor
del clean_dictionary[key]
for tag_part in tag_parts:
word_tag = word + '_' + tag_part
if word_tag in clean_dictionary:
clean_dictionary[word_tag] = clean_dictionary[word_tag] + value
else:
clean_dictionary[word_tag] = value
return clean_dictionary |
def to_xml_elem(key, val):
"""
Returns an xml element of type key containing the val, unless val is the
empty string when it returns a closed xml element.
:param key: The xml tag of the element to generate. This must be a valid xml tag
name because it is not escaped.
:param val: The value of the element to generate.
:return: An xml element string.
"""
if val == "":
return f"<{key}/>"
else:
return f"<{key}>{val}</{key}>" |
def _star_passthrough(args):
""" this is so we can give a zipped iterable to func """
# args[0] is function, args[1] is positional args, and args[2] is kwargs
return args[0](*(args[1]), **(args[2])) |
def set_default_hyperparameters(path, taskname):
"""
Retrieve the default hyperparameters to be set for a primitive.
Parameters ---------
path: Python path of a primitive
"""
hyperparams = None
# Set hyperparameters for specific primitives
if path == 'd3m.primitives.data_cleaning.imputer.SKlearn':
hyperparams = {}
hyperparams['use_semantic_types'] = True
hyperparams['return_result'] = 'replace'
hyperparams['strategy'] = 'median'
hyperparams['error_on_no_input'] = False
if path == 'd3m.primitives.data_transformation.one_hot_encoder.SKlearn':
hyperparams = {}
hyperparams['use_semantic_types'] = True
hyperparams['return_result'] = 'replace'
hyperparams['handle_unknown'] = 'ignore'
if path == 'd3m.primitives.data_preprocessing.robust_scaler.SKlearn':
hyperparams = {}
hyperparams['return_result'] = 'replace'
if path == 'd3m.primitives.feature_construction.corex_text.DSBOX':
hyperparams = {}
hyperparams['threshold'] = 500
if 'conditioner' in path:
hyperparams = {}
hyperparams['ensure_numeric'] = True
hyperparams['maximum_expansion'] = 30
if path == 'd3m.primitives.time_series_classification.k_neighbors.Kanine':
hyperparams = {}
hyperparams['n_neighbors'] = 1
if path == 'd3m.primitives.clustering.k_means.Fastlvm':
hyperparams = {}
hyperparams['k'] = 100
if 'adjacency_spectral_embedding.JHU' in path:
hyperparams = {}
hyperparams['max_dimension'] = 5
hyperparams['use_attributes'] = True
if 'LINK' in taskname:
hyperparams['max_dimension'] = 2
hyperparams['use_attributes'] = False
hyperparams['which_elbow'] = 1
if 'splitter' in path:
hyperparams = {}
if taskname == 'IMAGE' or taskname == 'IMAGE2' or taskname == 'AUDIO':
hyperparams['threshold_row_length'] = 1200
else:
hyperparams['threshold_row_length'] = 50000
if path == 'd3m.primitives.link_prediction.link_prediction.DistilLinkPrediction':
hyperparams = {}
hyperparams['metric'] = 'accuracy'
if path == 'd3m.primitives.vertex_nomination.vertex_nomination.DistilVertexNomination':
hyperparams = {}
hyperparams['metric'] = 'accuracy'
if path == 'd3m.primitives.graph_matching.seeded_graph_matching.DistilSeededGraphMatcher':
hyperparams = {}
hyperparams['metric'] = 'accuracy'
if 'PCA' in path:
hyperparams = {}
hyperparams['n_components'] = 10
if path == 'd3m.primitives.data_preprocessing.image_reader.Common':
hyperparams = {}
hyperparams['use_columns'] = [0,1]
hyperparams['return_result'] = 'replace'
if path == 'd3m.primitives.data_preprocessing.text_reader.Common':
hyperparams = {}
hyperparams['return_result'] = 'replace'
if path == 'd3m.primitives.schema_discovery.profiler.Common':
hyperparams = {}
hyperparams['categorical_max_absolute_distinct_values'] = None
if path == 'd3m.primitives.time_series_forecasting.arima.DSBOX':
hyperparams = {}
hyperparams['take_log'] = False
if path == 'd3m.primitives.time_series_forecasting.lstm.DeepAR':
hyperparams = {}
hyperparams['epochs'] = 3
if path == 'd3m.primitives.graph_clustering.gaussian_clustering.JHU':
hyperparams = {}
hyperparams['max_clusters'] = 10
if path == 'd3m.primitives.time_series_forecasting.esrnn.RNN':
hyperparams = {}
hyperparams['auto_tune'] = True
hyperparams['output_size'] = 60
return hyperparams |
def parse_weight(weight):
"""Validate node weight and format to float.
Args:
weight (str,int,float): node weight.
Returns:
float: Correctly formatted node weight.
Raises:
ValueError: Invalid node weight.
"""
try:
weight = float(weight)
except TypeError:
raise ValueError("Invalid Weight '%s'" % weight)
return weight |
def remove_file_from_tree(tree, file_path):
"""Remove a file from a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of a file to remove from a tree.
Returns:
The provided tree, but with the item matching the specified
file_path removed.
"""
match = None
for item in tree:
if item.get("path") == file_path:
match = item
break
if match:
tree.remove(match)
return tree |
def split_one(value, key):
"""
Returns the value turned into a list.
"""
return value.split(key, 1) |
def list_same(list_a, list_b):
"""
Return the items from list_b that are also on list_a
"""
result = []
for item in list_b:
if item in list_a:
result.append(item)
return result |
def get_actions(op):
"""Return the operation's array of actions."""
return op.get('metadata', {}).get('pipeline').get('actions', []) |
def GetMeta(result: dict, meta: str = '') -> list:
"""Extract from input and return a list of 2nd arg selectable of cwd user error exitcode"""
output = []
if not result: return output
if isinstance(result, dict) and 'metadata' in result: # these works only for AliEn responses
meta_opts_list = meta.split() if meta else []
if 'cwd' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["currentdir"])
if 'user' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["user"])
if 'error' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["error"])
if 'exitcode' in meta_opts_list or 'all' in meta_opts_list: output.append(result["metadata"]["exitcode"])
return output |
def sami_params(php, os):
"""Configuration parameters for sami with their default values"""
return {'KRW_CODE': '/code',
'SAMI_CONFIG': '/etc/sami/sami.conf.php',
'SAMI_PROJECT_TITLE': 'API Documentation',
'SAMI_SOURCE_DIR': 'src',
'SAMI_BUILD_DIR': 'docs/build/html/api',
'SAMI_CACHE_DIR': 'docs/cache/html/api',
'SAMI_FLAGS': '-v --force',
'SAMI_SERVER_PORT': 8001,
'SAMI_SOURCE_REGEX': r'\.\(php\|txt\|rst\)$',
'SAMI_THEME': 'default'} |
def __is_int(s):
"""Helper function that returns whether or not a string textually
represents an integer."""
try:
int(s)
return True
except ValueError:
return False |
def _decode_edge_attributes(color):
""" decode vertex attributes from integer values
scheme:
bond order <=> tens place
parity <=> ones place (None->0, False->1, True->2)
"""
id2 = color // 10
color -= id2 * 10
id1 = color // 1
order = id2
assert id1 in (0, 1, 2)
par = None if id1 == 0 else bool(id1 - 1)
return order, par |
def find_min(L):
"""find min uses a loop to return the minimum of L.
Argument L: a nonempty list of numbers.
Return value: the smallest value in L.
"""
result = L[0]
for x in L:
if x < result:
result = x
return result |
def get_swaps(path):
"""Take a list (path) between an end qbit index and
a start qbit index. Return a list of tuples (replacement_gates)
which correspond to the set of swap gates required to swap the
end qbit with the start qbit"""
replacement_gates = []
for i in range(len(path) - 1): # iterate over the path
swap = ('S', [path[i], path[i + 1]], []) # swap gate between consecutive qbits along the path
replacement_gates.append(swap)
# at this point, we have shifted each qbit along the path
fix_offset = replacement_gates[:-1] # by 1, this may be a problem if we swapped the control qbit
fix_offset.reverse() # along the path, so we need to fix the shift before we go ahead
replacement_gates += fix_offset # this simply involves performing the same swaps in the reverse order
# excluding the very final swap in the first case
return replacement_gates |
def factors_of_a_number(num: int) -> list:
"""
>>> factors_of_a_number(1)
[1]
>>> factors_of_a_number(5)
[1, 5]
>>> factors_of_a_number(24)
[1, 2, 3, 4, 6, 8, 12, 24]
>>> factors_of_a_number(-24)
[]
"""
return [i for i in range(1, num + 1) if num % i == 0] |
def clean_date(date):
"""
Perform date format cleanup
"""
months = {'Jan': 'January',
'Feb': 'February',
'Mar': 'March',
'Apr': 'April',
'Mai': 'May',
'May': 'May',
'Jun': 'June',
'Jul': 'July',
'Aug': 'August',
'Juli': 'July',
'Sep': 'September',
'Oct': 'October',
'Nov': 'November',
'Dec': 'December',
'Jane': 'January'}
index = 0
result = date[:7]
data = date[7:].title().strip()
letter = word = False
while index < len(data):
if data[index] == '0' and not word:
index = index + 1
continue
if data[index] == ' ':
word = False
else:
word = True
if data[index].isalpha:
letter = True
elif data[index].isdigit and letter:
result = result + ' '
word = False
result = result + data[index]
index = index + 1
for month in months:
if ' {0} '.format(month) in result:
result = result.replace(month, months[month])
while ' ' in result:
result = result.replace(' ', ' ')
return '{0}\n'.format(result) |
def _make_divisible(v, divisor, width=1, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v |
def getFirst(text, signs):
"""
Returns a position of the sign which occurs the first.
@param {string} text.
@param {Array.<string>} signs.
@return {number} Position.
"""
positions = [text.find(sign) for sign in signs if text.find(sign) != -1]
if positions:
return min(positions)
else:
return -1 |
def label_name(condition):
"""format label name
"""
label = condition.replace("_", " ").split(".")
label[0] = label[0].capitalize()
return " ".join(label) |
def purity(first_clustering, second_clustering):
"""
Returns the purity of the given two clusterings
:param first_clusterings: a set of iterables to compare
:param second_clusterings: a set of iterables to compare
:return: the purity index as a double
"""
summation = 0
for cluster in first_clustering:
highest = 0
for comparer in second_clustering:
next_element = len(cluster.intersection(comparer))
if next_element > highest:
highest = next_element
summation += highest
# find total number of data points
N = sum(len(cluster) for cluster in first_clustering)
if N == 0:
return 0
return summation / N |
def export(number):
"""Exports a python float to a Java long to avoid loss of precision."""
from struct import pack, unpack
return str(unpack("<q",pack("<d", number))[0]) + "L" |
def get_wait_time(authenticated):
"""Get the wait time based on whether EUtils API use is authenticated or not.
Parameters
----------
authenticated : bool
Whether EUtils API use is authenticated.
Returns
-------
float
Wait time to use between API calls, in seconds.
Notes
-----
The wait time for requesting is set for the E-Utils API, which allows for:
- 10 requests/second for authenticated users (using an API key)
- 3 requests/second otherwise
More information on rate limits is available here: https://www.ncbi.nlm.nih.gov/books/NBK25497/
"""
return 1/10 if authenticated else 1/3 |
def distance(a, b):
""" Manthatten distance """
(ax, ay) = a
(bx, by) = b
return abs(ax - bx) + abs(ay - by) |
def flatten_list(lst):
"""
lst: list of lists
retuns flattened list
"""
return [item for sublist in lst for item in sublist] |
def __one_forward_open(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
x -= 1
y -= 1
if not c: x, y = l - y, l - x
return x, y |
def number_of_bits_needed_to_communicates_no_compressed(nb_devices:int, d: int) -> int:
"""Computing the theoretical number of bits used for a single way when using compression (with Elias encoding)."""
return nb_devices * d * 32 |
def IsAioNode(tag):
"""Returns True iff tag represents an AIO node."""
return tag.startswith('aio_nodes.') |
def addBroadcastBits( iAdr, bitCount ):
"""
iAdr is 32 bit integer
bitCount is integer.
"""
# set the broadcast values
for idx in range( 32-bitCount ):
iAdr = iAdr | (1 << idx)
return iAdr |
def _rpm_long_size_hack(hdr, size):
""" Rpm returns None, for certain sizes. And has a "longsize" for the real
values. """
return hdr[size] or hdr['long' + size] |
def delete_cancelled_events(events):
"""
Function to delete all cancelled events.
:param events: all elements of `DayStudyEvents`
:type events: list
:return: list of available events
:rtype: list
"""
return [
event for event in events
if not event["IsCancelled"]
] |
def surf_vol(length, girth):
"""Calculate the surface volume of an animal from its length and girth
Args
----
length: float or ndarray
Length of animal (m)
girth: float or ndarray
Girth of animal (m)
Returns
-------
surf:
Surface area of animal (m^2)
vol: float or ndarray
Volume of animal (m^3)
"""
import numpy
a_r = 0.01 * girth / (2 * numpy.pi)
stl_l = 0.01 * length
c_r = stl_l / 2
e = numpy.sqrt(1 - (a_r ** 2 / c_r ** 2))
surf = (2 * numpy.pi * a_r ** 2) + (2 * numpy.pi * ((a_r * c_r) / e)) * 1 / (
numpy.sin(e)
)
vol = ((4 / 3) * numpy.pi) * (a_r ** 2) * c_r
return surf, vol |
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10 ** e
else:
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10 ** -e |
def true_dict(data_dict):
"""
Converts string to boolean type
"""
for key in data_dict:
if str(data_dict[key]).lower() == "true":
data_dict[key] = True
elif str(data_dict[key]).lower() == "false":
data_dict[key] = False
elif type(data_dict[key]) is dict:
data_dict[key] = true_dict(data_dict[key])
return data_dict |
def prob_from_crowd_label_cheat(l, uncer):
"""
return the prob of true label
from the crowd label and workers agreements
"""
agree = uncer[0] * 1.0 / (uncer[0] + uncer[1])
if l == 0:
if uncer[0] == 5: return (0.99, 0.01)
elif uncer[0] == 4: return (0.95, 0.05)
else: return (0.87, 0.13)
else:
if uncer[1] == 5: return (0.2, 0.8)
elif uncer[1] == 4: return (0.36, 0.64)
else: return (0.67, 0.33) |
def _isint(x):
"""returns True if x is an int, False otherwise"""
try:
int(x)
return True
except:
return False |
def _get(elements, index):
"""Return element at the given index or None."""
return None if index is None else elements[index] |
def insertion_sort(nums):
"""Insertion Sort"""
if not nums:
return None
for i in range(1, len(nums)):
value = nums[i]
j = i - 1
while j >= 0:
if nums[j] > value:
nums[j + 1] = nums[j]
else:
break
j = j - 1
nums[j + 1] = value
return |
def string_bits(str_):
"""Return a string with every other character.
:return: Return a string with the odd characters removed.
"""
return str_[::2] |
def split_uri(uri, mod_attr_sep='::'):
"""Split given URI into a tuple of (protocol, module URI, attribute chain).
@param mod_attr_sep: the separator between module name and attribute name.
"""
uri_parts = uri.split(mod_attr_sep, 1)
if len(uri_parts) == 2:
mod_uri, attr_chain = uri_parts
else:
mod_uri = uri_parts[0]
attr_chain = None
if mod_uri.startswith('py://'):
protocol = 'py'
mod_uri = mod_uri[5:]
elif mod_uri.startswith('file://'):
protocol = 'file'
mod_uri = mod_uri[7:]
# If no protocol prefix is present, and the uri ends with `.py`, then
# consider the uri as module file path instead of module name.
elif mod_uri.endswith('.py'):
protocol = 'file'
else:
protocol = 'py'
info = (protocol, mod_uri, attr_chain)
return info |
def parse_float_string(obj: str) -> float:
"""Parse a string as `float`. Throws a ValueError if `obj` is not a string.
Args:
obj : the string to parse
"""
if not isinstance(obj, str):
raise ValueError()
return float(obj) |
def get_nmods(mod_seq, mod_str="Oxidation"):
"""
Get the number of modifications.
Args:
mod_seq: str, modified sequence string
mod_str: str, modification string to count
Returns:
int, number of mods seen in mod_seq
"""
return mod_seq.count(mod_str) |
def numeric(s):
"""
:param s:
:return:
"""
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s |
def _format2_out_to_abstract(format2_step, run=None):
"""Convert Format2 'out' list for step into CWL abstract 'out' list."""
cwl_out = []
if "out" in format2_step:
out = format2_step.get("out")
if isinstance(out, dict):
for out_name in out.keys():
# discard PJA info when converting to abstract CWL
cwl_out.append(out_name)
else:
cwl_out = out
return cwl_out |
def to_int(text):
"""
extract digits from text
"""
return ''.join([char for char in text if char.isdigit()]) |
def cleaned_up(review):
"""
Clean up a review.
:param review: Review to clean up.
:return: Cleaned up review.
"""
# Remove the Beer Buddy suffix.
suffix = '---Rated via Beer Buddy for iPhone'
if review.endswith(suffix):
review = review[:len(review) - len(suffix)]
# Remove multiple spaces.
review = ' '.join(review.split()) + '\n\n'
# Delete the review if it's too short.
if len(review.split()) < 5:
review = ''
return review |
def nested_sum(ints):
"""Returns the sum of a nested list of integers
ints: nested list of integers
"""
int_sum = 0
# Recursive case, if ints is a list then add together the sum of each element
# of ints (whether or not that element happens to be a list). Calling nested
# sum on each element should return the nested sum of that list.
# E.g. for nested_sum([[1,2],[3]]) = nested_sum([1,2]) + nested_sum([3])
# Then nested_sum([1,2]) = nested_sum(1) + nested_sum(2) = 1 + 2
# And nested_sum([3]) = nested_sum(3) = 3
if isinstance(ints, list):
for nums in ints:
int_sum += nested_sum(nums)
# Base case, ints is not a list. It could be anything else, but hopefully we've
# been passed a list with only numbers (or only strings!) in it. The sum
# of a single atomic element will just be that element, so return that.
# E.g. nested_sum(3) = 3.
# Note that nested_sum([3]) still has a list, so it will call the fuction
# again on 3. nested_sum([3]) = nested_sum(3) = 3.
else:
int_sum = ints
return int_sum |
def merge(ll, rl):
"""Merge given two lists while sorting the items in them together
:param ll: List one (left list)
:param rl: List two (right list)
:returns: Sorted, merged list
"""
res = []
while len(ll) != 0 and len(rl) != 0:
if ll[0] < rl[0]:
res.append(ll.pop(0))
else:
res.append(rl.pop(0))
if ll:
res = res + ll
if rl:
res = res + rl
return res |
def mapSeries(requestContext, seriesList, mapNode):
"""
Short form: ``map()``
Takes a seriesList and maps it to a list of seriesList. Each seriesList has the
given mapNode in common.
.. note:: This function is not very useful alone. It should be used with :py:func:`reduceSeries`
.. code-block:: none
mapSeries(servers.*.cpu.*,1) =>
[
servers.server1.cpu.*,
servers.server2.cpu.*,
...
servers.serverN.cpu.*
]
"""
metaSeries = {}
keys = []
for series in seriesList:
key = series.name.split(".")[mapNode]
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
return [ metaSeries[k] for k in keys ] |
def get_volume_reduction_method(config_file):
"""
Checks in the configuration file if a volume reduction method has been set.
Parameters
----------
config_file: dict
The configuration file used in the parent code.
Returns
-------
algorithm: str
Volume reduction algorithm name
"""
if config_file['volume_reducer']['algorithm'] is None:
algorithm = None
else:
algorithm = config_file['volume_reducer']['algorithm']
return algorithm |
def statsboardDataConsistent(statsboardData, hostsOnStage):
"""
Utility function validates the consistency of statsboard data fetched
for use by service add ons.
:param statsboardData:
:param hostsOnStage:
:return:
"""
numHostsOnCurrStage = len(hostsOnStage)
if not statsboardData:
return False
hostTypes = []
hostsFound = []
for dataSlice in statsboardData:
if "host" in dataSlice["tags"]:
hostsFound.append(dataSlice["tags"]["host"])
if "host_type" in dataSlice["tags"]:
hostTypes.append(dataSlice["tags"]["host_type"])
# There should be no errors in each data slice.
if "error" in dataSlice:
return False
# There should only be one host type on each stage.
if len(set(hostTypes)) > 1:
return False
# We should not get results for more hosts than are on the current stage.
if len(set(hostsFound)) > numHostsOnCurrStage:
return False
# We should not receive results for any host that is not on the current stage.
for h in hostsFound:
if h not in hostsOnStage:
return False
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.