content stringlengths 42 6.51k |
|---|
def _gen_perm_Numpy(order, mode):
"""
Generate the specified permutation by the given mode.
Parameters
----------
order : int
the length of permutation
mode : int
the mode of specific permutation
Returns
-------
list
the axis order, according to Kolda's unfold
"""
tmp = list(range(order - 1, -1, -1))
tmp.remove(mode)
perm = [mode] + tmp
return perm |
def parse_parent(docname):
""" Given a docname path, pick apart and return name of parent """
lineage = docname.split('/')
lineage_count = len(lineage)
if docname == 'index':
# This is the top of the Sphinx project
parent = None
elif lineage_count == 1:
# This is a non-index doc in root, e.g. about
parent = 'index'
elif lineage_count == 2 and lineage[-1] == 'index':
# This is blog/index, parent is the root
parent = 'index'
elif lineage_count == 2:
# This is blog/about
parent = lineage[0] + '/index'
elif lineage[-1] == 'index':
# This is blog/sub/index
parent = '/'.join(lineage[:-2]) + '/index'
else:
# This should be blog/sub/about
parent = '/'.join(lineage[:-1]) + '/index'
return parent |
def all_byte_labels_are_defined(byte_labels):
"""
Check whether all labels have already been defined.
"""
return (False not in [label["definition"] for label in byte_labels.values()]) |
def _split_privs(privspec):
"""Split the aclitem into three parts
:param privspec: privilege specification (aclitem)
:return: tuple with grantee, privilege codes and granto
"""
(usr, prvgrant) = privspec.split('=')
if usr == '':
usr = 'PUBLIC'
(privcodes, grantor) = prvgrant.split('/')
return (usr, privcodes, grantor) |
def cut_signal_apply(cols):
"""Determine if the tool is in cut, or not, from PMC signal
Explanation
===========
The PMC signal is a binary number with 7 characters (e.g. 1111001, representing 121
in base 10 number system). Another example: 0000110 represents 6. If the first digit
in the sequence is a 1, then the tool is in cut (1000000 is equal to 64 in base 10).
So if tool 6 is in cut, the PMC signal would be 1000110. 1000110 is equivalent to
70 in base 10.
So if the first digit is 1, the tool is in cut. The remaining digits equal the tool number.
When the PMC signal is saved to the CSV, it is saved as a base 10 number. Work in the base 10 then.
Subtract 64 from the number. If the result is greater than 0, then we know the tool is in cut, and the
tool number is pmc_no - 64. If, after subtracting 64, the result is negative, we know that the tool
is out of cut, and the tool number is equal to pmc_no.
"""
pmc = cols[0]
if (pmc - 64) > 0:
return 1
else:
return 0 |
def validate_int(number):
"""This function validates the numbers we get from
the user before we can add them to our database"""
try:
int(number)
except ValueError:
return False
else:
if int(number) <= 0:
return False
return True |
def hash_filter(first_hits, second_hits):
"""
This function filters the second network using dictionaries
:param first_hits: Network from first query
:param second_hits: Network from second query
:return: filtered network
"""
protein_filtered = {}
# Now filter
# Protein
for interaction, row in second_hits.items():
p1, p2 = interaction.split("\t")
if p2 in first_hits:
protein_filtered[interaction] = row
return protein_filtered |
def triangle_number(n):
"""
Return the nth triangle number; i.e., the value of
``1+2+3+...+(n-1)+n``.
"""
return n*(n+1)/2 |
def extract_cols_from_data_type(data_type, column_definition,
excluded_input_types):
"""Extracts the names of columns that correspond to a define data_type.
Args:
data_type: DataType of columns to extract.
column_definition: Column definition to use.
excluded_input_types: Set of input types to exclude
Returns:
List of names for columns with data type specified.
"""
return [
tup[0]
for tup in column_definition
if tup[1] == data_type and tup[2] not in excluded_input_types
] |
def get_aws_account_id_from_arn(lambda_arn):
"""
retrieves and return aws account id from an arn
:param lambda_arn: arn of a calling lambda
:type lambda_arn: string
:returns: aws account id
:rtype: string
"""
return lambda_arn.split(':')[4] |
def inGigabytes(nbytes):
"""Convert bytes to gigabytes"""
return nbytes / (1024. ** 3) |
def is_hypo_isomorphic(list1, list2):
""" Based on http://www.geeksforgeeks.org/check-if-two-given-strings-are-isomorphic-to-each-other/ """
if len(list1) != len(list2):
return False
n = len(list2)
max_ind = max(list1 + list2) + 1
marked = [False]*max_ind
mapped = [-1]*max_ind
for i in range(n):
if mapped[list1[i]] == -1:
if marked[list2[i]] == True:
return False
marked[list2[i]] = True
mapped[list1[i]] = list2[i]
elif mapped[list1[i]] != list2[i]:
return False
return True |
def input_to_list(input_data, capitalize_input=False):
""" Helper function for handling input list or str from the user.
Args:
input_data (list or str): input from the user to handle.
capitalize_input (boo): whether to capitalize the input list data or not.
Returns:
list: returns the original list or list that was split by comma.
"""
input_data = input_data if input_data else []
input_data = input_data if isinstance(input_data, list) else [s for s in input_data.split(',') if s]
if capitalize_input:
return [" ".join([w.title() if w.islower() else w for w in i.split()]) for i in input_data]
else:
return input_data |
def create_search_criterion_by_subject(subject):
"""Return search criteria by subject.
.. versionadded:: 0.4
"""
return 'SUBJECT "{}"'.format(subject) |
def timeFormatPretty(msg):
"""Converts 20:03:46.0156250 to 08:03:46 PM"""
if "." in msg: msg=msg.split(".")[0]
h,m,s=msg.split(":")
h24=int(h)
h,m,s=int(h),int(m),int(s)
suffix="AM"
if h>12:
h=h-12
suffix="PM"
#msg="%02d:%02d:%02d %s"%(h,m,s,suffix)
msg="%02d:%02d:%02d"%(h24,m,s)
return msg |
def item_len(item):
"""return length of the string format of item"""
return len(str(item)) |
def time_stamps_manipulation(time_stamp_text):
""" Change the time stamps string into a standard JSON format
This function receives the string of JSON file and convert the string
into a standard and readable JSON file without quotation marks
:param time_stamp_text: JSON string with quotations outside
:returns:
- time_stamps - the standard format of JSON string contains
time_stamps key
"""
time_stamps = time_stamp_text
time_stamps = time_stamps[0:-1]
time_stamps = time_stamps.replace('"', "")
time_stamps = time_stamps.replace('[', "")
time_stamps = time_stamps.replace(']', "")
time_stamps = time_stamps.split(",")
return time_stamps |
def filter1(tally_dict, num_bison, num_cattle):
"""
This function will filter the data based on...
1. More than 0 homozygous alternative cows -> could indicate common ancestry
2. More than 0 heterozygous cows
3. Heterozygous bison frequency less than 30% of total called bison.
@param tally_dict: Dictionary containing the records tally of genotypic frequencies of the samples.
@return: True if the tally dict passes all checks, False if it does not pass all checks.
"""
try:
# Initial REJECT if less than 30% of cows are called. ***
assert(tally_dict['total_cows_called'] / num_cattle > 0.3)
# Fix ZeroDivisionError
assert(tally_dict["total_cows_called"] != 0)
# No homozygous alternative cattle samples
assert(tally_dict['cow_hom_alt'] == 0)
# No heterozygous cattle samples
assert(tally_dict['cow_het'] == 0)
# Only one heterozygous bison sample
assert(tally_dict['bison_het'] == 1)
# No homozygous reference bison
assert(tally_dict['bison_hom_ref'] == 0)
# Initial REJECT if less than 30% of bison are called. ***
assert(tally_dict['total_bison_called'] / num_bison > 0.3)
# Ensure there are homozygous alternative bison present
assert(tally_dict['bison_hom_alt'] != 0)
return True
except AssertionError:
return False |
def seconds_to_milliseconds(time):
"""
Returns: time converted from seconds to milliseconds
"""
return int(round(time * 1000)) |
def makeGeojson(itemType, coords, msg):
"""Create geojson dictionary.
Args:
itemType (str): One of ``Point`` or ``LineString``.
coords (list): List of two coordinates (longitude, latitude).
id (str): Unique name (``_id``) field for Pirep
Returns:
dict: Dictionary which is a 'geojson' slot of the specified type
and coordinates.
"""
geojson = {'type': 'FeatureCollection', \
'features': [{'type': 'Feature', \
'geometry': { \
'type': itemType, \
'coordinates': coords}}]}
geojson['features'][0]['properties'] = { \
'id': msg['unique_name'], \
}
return geojson |
def normalize_assignments(assignments):
"""Convert a clustering state <assignments> so that all essentially equal states can be
same array. Return value type is Tuple so that it can be hashed as a dict key.
"""
convert_table = {}
new_id = 0
for cluster_id in assignments:
if cluster_id not in convert_table:
convert_table[cluster_id] = new_id
new_id += 1
return tuple([convert_table[cluster_id] for cluster_id in assignments]) |
def home_view(request):
""" Return a list of all valid API calls by way of documentation. """
call_list = {"Valid API Call List":{
"Retrieve User List": "/users",
"Get my details": "/user",
"Get my touches": "/user/touches",
"Set my password": "/user/password",
"Get my credit": "/user/credit",
"servers": "/servers", # Return server list
"Server details by name": "/servers/{name}", # Get server details or
"Server details by ID": "/servers/by_id/{id}",
"Start a server": "/servers/{name}/Starting",
"Stop a server": "/servers/{name}/Stopping",
"Restart a server": "/servers/{name}/Restarting",
"De-Boost a server": "/servers/{name}/pre_deboosting",
"server_Pre_Deboosted": "/servers/{name}/Pre_deboosted",
"server_Deboost": "/servers/{name}/deboosting",
"server_Started": "/servers/{name}/Started",
"server_Stopped": "/servers/{name}/Stopped",
"Boost a server": "/servers/{name}/preparing",
"server_Prepared": "/servers/{name}/prepared",
"server_owner": "/servers/{name}/owner",
"server_touches": "/servers/{name}/touches",
"CPU/RAM Specification": "/servers/{name}/specification",
"All states, and count by state": "/states",
"Servers is state": "/states/{name}",
"Servers needing deboost": "/deboost_jobs",
}
}
return call_list |
def reconstruct_path_to_point(point, came_from_graph):
"""
Creates a path from start to destination.
Uses the graph (dictionary) of preceding nodes (created by A* algorithm).
The path does not contain a starting point.
"""
path = []
while point in came_from_graph:
path.insert(0, point)
point = came_from_graph[point]
return path |
def get_preferred_taxa_name(taxa_id, megan_map, id_to_name):
"""
Helper function to format NCBI IDs into preferred names. First checks for MEGAN name,
if not found moves to current taxonomy in loaded NCBI taxonomy tree, failing that
gives the taxonomy of 'Unknown', but still provides the id, e.g., 'Unknown (12345)'.
:param taxa_id: numeric taxa id to translate
:param megan_map: preferred megan mapping hash
:param id_to_name: local ncbi tree hash
:return: "perferred name (id)"
"""
taxa_id = str(taxa_id)
if taxa_id in megan_map:
taxa = megan_map[ taxa_id ] + " (" + taxa_id + ")"
elif taxa_id in id_to_name:
taxa = id_to_name[ taxa_id ] + " (" + taxa_id + ")"
else:
taxa = "Unknown" + " (" + taxa_id + ")"
return taxa |
def construct_session_manager_url(instance_id: str, region: str = "us-east-1") -> str:
"""Assemble the AWS console session manager url with the current instance id and region."""
session_manager_url = f"https://{region}.console.aws.amazon.com/systems-manager/session-manager/{instance_id}?region={region}" # noqa: E501
return session_manager_url |
def decode_to_string(data):
"""
Decode the strings in the list/set so we can call print the strings without the 'u' in front
Args:
data (list(str) or set(str))
"""
return str([x.encode('UTF8') for x in data]) |
def _process(json_data):
"""Return a 3d array (iteration x spreadsheet) of microplates' names."""
iterations = []
for iteration in json_data[u'iterations']:
spreadsheets = []
for spreadsheet in iteration[u'spreadsheets']:
spreadsheets.append(spreadsheet[u'microplates'].keys())
iterations.append(spreadsheets)
return iterations |
def _prepare_shape_for_expand_dims(shape, axes):
"""
Creates the expanded new shape based on the shape and given axes
Args:
shape (tuple): the shape of the tensor
axes Union(int, tuple(int), list(int)): the axes with dimensions expanded.
Returns:
new_shape(tuple): the shape with dimensions expanded.
"""
new_shape = []
shape_idx = 0
new_shape_length = len(shape)
# Convert to set
if isinstance(axes, int):
new_shape_length += 1
if axes >= new_shape_length or axes < -new_shape_length:
raise ValueError(
f"axis {axes} is out of bounds for tensor of dimension {new_shape_length}")
axes = {axes}
elif isinstance(axes, (list, tuple)):
new_shape_length += len(axes)
for axis in axes:
if axis >= new_shape_length or axis < -new_shape_length:
raise ValueError(
f"axis {axis} is out of bounds for tensor of dimension {new_shape_length}")
axes = set(axes)
else:
raise TypeError(
f"only int, tuple and list are allowed for axes, but got {type(axes)}")
for new_shape_idx in range(new_shape_length):
if new_shape_idx in axes or new_shape_idx - new_shape_length in axes:
new_shape.append(1)
else:
new_shape.append(shape[shape_idx])
shape_idx += 1
return tuple(new_shape) |
def slice_extend(sl, max_stop, min_start=0):
"""Failure-tollerant slice extension in a specified range.
Given a slice, checks that sl.start is not less than min_start,
and that s.stop is not more or equal to max_stop.
If one condition appears, adjust slice (transforming to list)
by filling with min_start or (max_stop-1).
"""
sl_pre, first = (min_start - sl.start if sl.start < min_start else 0,
max(min_start, sl.start))
sl_post, last = (sl.stop - max_stop if sl.stop > max_stop else 0,
min(max_stop, sl.stop))
if sl_pre != 0 or sl_post != 0:
sl = ([min_start,]*sl_pre + list(range(first, last)) +
[max_stop-1,]*sl_post)
return sl |
def is_word_guessed(word, past_guesses):
"""Returns whether the word is guessed or not
The function will return True if the word is fully guessed, otherwise, will
return false. For example, is_word_guessed('hello',['h', 'e', 'a', 'o',
'l']) should return True while is_word_guessed('hello', ['e', 't', 'a'])
should return False.
Args:
word: a string
past_guesses: a list of strings
Returns:
bool: True if word was guessed else false
"""
result = True
for element in word:
if element not in past_guesses:
result = False
return result |
def parser_CA_system_Descriptor(data,i,length,end):
"""\
parser_CA_system_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "CA_system", "contents" : unparsed_descriptor_contents }
(Defined in ETSI EN 300 468 specification)
"""
return { "type" : "CA_system", "contents" : data[i+2:end] } |
def skip_if(cursor, offset, date_key):
"""Return True if we want to skip a particular date"""
# Useful to skip work that already has been done
return int(date_key) < 20191227 |
def winning_stones(consecutive, open_ends):
"""Check if a series of stones is advantageous."""
return (consecutive == 3 and open_ends == 2 or
consecutive == 4 and open_ends >= 1 or
consecutive >= 5) |
def hex2dec(string):
"""Convert a hexadecimal string to decimal number"""
return int(string,16) |
def lsst_num_exposures(bands='', coadd_years=10):
"""
Sample from the LSST number of exposures distribution
Args:
coadd_years (int): Number of years of the survey to utlize
"""
dist = {'u': 140, 'g': 200, 'r': 460, 'i': 460, 'z': 400, 'Y': 400}
return [coadd_years * dist[b] // 10 for b in bands.split(',')] |
def toggle_popover_tab3(n, is_open):
"""
:return: Open pop-over callback for how to use button for tab 2.
"""
if n:
return not is_open
return is_open |
def find_supports(combinations, supports, combination):
"""
find supports of combinations
Parameters
----------
combinations : list
combinations to find supports.
for example :
[("apple"), ("banana"), ("mango"),
("apple", "banana"), ...]
supports : list
support of combinations.
for example :
[0.43, 0.64, 0.35,
0.2, ...]
combination : list
combination to find support from combinations.
for example :
("mango")
Returns
-------
supports of combination.
for example :
= 0.35
"""
for i, comb in enumerate(combinations):
if set(comb) == set(combination):
return supports[i]
return 0 |
def get_numeric_event_attribute_rep(event_attribute):
"""
Get the feature name associated to a numeric event attribute
Parameters
------------
event_attribute
Name of the event attribute
Returns
-------------
feature_name
Name of the feature
"""
return "event:" + event_attribute |
def down_index(index):
"""Function to return down-orbital index given a spatial orbital index.
Args:
index (int): spatial orbital index
Returns:
An integer representing the index of the associated spin-down orbital
"""
return 2 * index + 1 |
def _filter_out_duplicate_spotify_artists(spotify_artists):
"""
We should not try to add the same artist multiple times to the graph, so filter
out any duplicates.
"""
spotify_artist_dictionary = {
spotify_artist.id: spotify_artist for spotify_artist in spotify_artists
}
return list(spotify_artist_dictionary.values()) |
def distance(x1, y1, x2, y2):
"""Get the distance between two points."""
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5 |
def get_loss_weights(config: dict):
"""
Create the correct loss weights dict.
"""
# Read config file
mode = config['mode']
main_out_weight = config['main_out_weight']
a_out_weight = config['a_out_weight']
v_out_weight = config['v_out_weight']
weights = {}
weights['main_out'] = main_out_weight
if mode == 's_av':
weights['a_out'] = a_out_weight
weights['v_out'] = v_out_weight
return weights |
def image_course_object_factory(image_id, course_id):
"""Cook up a fake imagecourse json object from given ids."""
courseimage = {
'image_id': image_id,
'course_id': course_id
}
return courseimage |
def subnlbybr(str):
"""
>>> subnlbybr("\\n")
'<br/>'
"""
return "<br/>".join(str.split("\n")) |
def combineBitVectors( trailer, meat, header ):
"""
Combine the 3 bit vectors comprizing a transmission. Since the least
significant bits are sent first, the header is put onto the list last so
they are sent first from that least significant position.
"""
ret = bytearray()
ret.extend( trailer )
ret.extend( meat )
ret.extend( header )
return ret |
def chunkData(bfr):
"""
Analyze the 64k buffer, find all 128Byte chunks that are in use (have data != to -1)
Will return a list of 512 entries with "1" meaning block of 128Bytes had data chaned and "0" no data changed
"""
mapped=[]
for i in range(0,len(bfr),128):
chunk = bfr[i:i+128] # split buffer into 128 Bytes chunks
used = False
for cell in chunk: # if any byte changed, mark chunk for upload
if cell >= 0:
used = True
if used:
mapped.append(1)
else:
mapped.append(0)
return mapped |
def subtract(a,b):
"""subtracts b from a and retuns the answer"""
c=a-b
return c |
def _format_list(name, list1, datadict, **kwargs):
"""Concatenate and format list items.
This is used to prepare substitutions in user-supplied args to the
various test invocations (ie: the location of test_bin).
Args:
@param name: The name of the item in `datadict`.
@param list1: A list of items to prepend to the list item from datadict.
@param datadict: A dictionary of per-test parameters.
@param **kwargs: Values to pass to the format function.
Returns:
list[str]
"""
return [x.format(**kwargs) for x in list1 + datadict.pop(name, [])] |
def left_ontime(departure):
"""Return True if left ontime. False, otherwise."""
planned = departure[0]
actual = departure[1]
if not actual:
return False
return actual == planned |
def make_importance_result_dict(importance_vectors, importance_measures, meta):
"""
:param importance_vectors: per example list of [vectors | dictionary of vectors]
:param importance_measures: per example list of [vector | dictionary of vectors]
:param meta: dictionary of tags or other values
:return: dictionary
"""
ret = {}
if importance_vectors is not None:
ret['importance_vectors'] = importance_vectors
if importance_measures is not None:
ret['importance_measures'] = importance_measures
ret['meta'] = meta
return ret |
def reverseIntegerA(x):
"""
:type x: int
:rtype: int
"""
n=str(x)
if x >= 0:
result=int(n[::-1])
return 0 if result > pow(2,31)-1 else result
else:
result=int("-"+n[1:][::-1])
return 0 if result < -pow(2,31) else result |
def save_fill(C, J):
"""Fill question marks at beginning, up to one before the first digit."""
first_digit = 0
for c, j in zip(C, J):
if c != '?' or j != '?':
break
first_digit += 1
for i in range(first_digit-1):
if C[i] == '?':
C = list(C)
C[i] = "0"
C = "".join(C)
if J[i] == '?':
J = list(J)
J[i] = "0"
J = "".join(J)
return C, J |
def to_alternating_case(string):
"""Input a string, return string with case swapped"""
return "".join([s.swapcase() for s in string]) |
def inv_sinc(arg):
"""
Newton-Raphson method for calculating arcsinc(x), from Obit.
"""
import numpy as np
x1 = 0.001
for i in range(0, 1000):
x0 = x1
a = x0 * np.pi
x1 = x0-((np.sin(a)/a)-arg) / ((a*np.cos(a) - np.pi*np.sin(a)) / (a**2))
if (np.fabs(x1 - x0) < 1.0e-6): break
return x1 |
def chose_examples(labels, label_set=None, number=1):
"""Choses n example of each label.
"""
if label_set is None:
label_set = set(labels)
out = []
for l in label_set:
start = -1
for _ in range(number):
start = labels.index(l, start + 1)
out.append(start)
return out |
def extract_one_summary(chapter_summary):
"""
This method will extract the summary and list of chapters present in "chapter_summary"
Args:
chapter_summary:
Returns:
Array where first element is a list of chapters in the summary, second element is string of the summary.
"""
chapters, summary_list = chapter_summary
summary_string = ' '.join(summary_list)
# We assume chapters is a string of form either: Chapter x or Chapter x - y , where x and y are both ints
chapter_list = chapters.split(" ")
temp_array = summary_string.split(".")
summary_string = ".\n".join(temp_array)
chapter_list = [int(c) for c in chapter_list if c.isdigit()]
if len(chapter_list) > 1:
chapter_list = list(range(chapter_list[0], chapter_list[1] + 1))
return chapter_list, summary_string |
def _create_sql_values(ids, values):
"""Creates concatenated string of values so there's no need to call INSERT in loop"""
arr = []
arr.extend(['(' + str(ids[i]) + ',' + str(values[i]) + ')' for i in range(len(ids))])
return ','.join(arr) |
def update_ocean_floor(points, ocean_floor):
"""
Updates all the given points on the ocean floor and increments their value by 1
:param points:
:param ocean_floor:
:return:
"""
for point in points:
ocean_floor[point[0]][point[1]] += 1
return ocean_floor |
def _func_dist(x, a, b, c):
"""
Function for finding the minimum distance.
"""
return x ** 2 + (a * x ** 2 + b * x + c) ** 2 |
def to_list(obj):
"""Convert a single object to a list."""
if obj is None:
return []
elif isinstance(obj, (list, tuple)):
return obj
return [obj] |
def sort_list_by_keylist(list_data, keylist, base):
"""
sort the list_data follow the keylist
:param list_data:
:param keylist:
:param base:
:return:
"""
sorted_list = []
for sorted_key in keylist:
for item in list_data:
if sorted_key == item[base]:
sorted_list.append(item)
return sorted_list |
def convert_distance_to_probability(distances, a, b):
""" convert distance representation into probability,
as a function of a, b params
"""
return 1.0 / (1.0 + a * distances ** (2 * b)) |
def session_path(info):
"""Construct a session group path from a dict of values."""
return "/data/rat{rat:02d}/day{day:02d}/{comment}".format(**info) |
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj |
def is_subdir(subdir_path, dir_path):
"""Returns "True" if it's the second path parameter is a subdirectory of the first path parameter."""
return (subdir_path.rstrip('/') + '/').startswith(dir_path.rstrip('/') + '/') |
def findAlphabeticallyLastWord(text):
"""
Given a string |text|, return the word in |text| that comes last
alphabetically (that is, the word that would appear last in a dictionary).
A word is defined by a maximal sequence of characters without whitespaces.
You may assume the input only consists of lowercase letters and whitespaces.
You might find max() and list comprehensions handy here.
"""
# BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)
return max(text.split())
# END_YOUR_CODE |
def _range_check(datarange):
"""
Utility to check if data range is outside of precision for 3 digit base 256
"""
maxrange = 256 ** 3
return datarange > maxrange |
def _strip_result(context_features):
"""Keep only the latest instance of each keyword.
Arguments
context_features (iterable): context features to check.
"""
stripped = []
processed = []
for feature in context_features:
keyword = feature['data'][0][1]
if keyword not in processed:
stripped.append(feature)
processed.append(keyword)
return stripped |
def _validate_arg(value, expected):
"""Returns whether or not ``value`` is the ``expected`` type."""
return isinstance(value, expected) |
def roman_to_int_converter(value):
"""Roman to integer converter"""
rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
int_val = 0
for i in range(len(value)):
if i > 0 and rom_val[value[i]] > rom_val[value[i - 1]]:
int_val += rom_val[value[i]] - 2 * rom_val[value[i - 1]]
else:
int_val += rom_val[value[i]]
return int_val |
def _bbox(lat_min, lon_min, lat_max, lon_max):
"""Format bounding box as a string as expected by the Overpass API."""
return f'({lat_min},{lon_min},{lat_max},{lon_max})' |
def find_num_of_days_annual(ann_num_of_days, rp_num_of_days):
"""Use runperiod data to calculate number of days for each annual period."""
days = rp_num_of_days[0] // len(ann_num_of_days)
return [days for _ in ann_num_of_days] |
def allequal(iterable):
""" Check if all elements inside an iterable are equal """
first = iterable[0]
rest = iterable[1:]
for item in rest:
if item == first: continue
else: return False
return True |
def sort(_list):
"""
Shell sort algorithm
:param _list: list of integers to sort
:return: sorted list
"""
gap = len(_list) // 2
while gap > 0:
for i in range(gap, len(_list)):
current_item = _list[i]
j = i
while j >= gap and _list[j - gap] > current_item:
_list[j] = _list[j - gap]
j -= gap
_list[j] = current_item
gap //= 2
return _list |
def _setheader(report_type, endpoint, page_url, cookie):
"""
Inicializa los headers dependiendo el tipo de reporte
"""
endpoint_uri = 'https://' + endpoint + '/'
headers = [
'Host: ' + endpoint,
'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language: en-US,en;q=0.5',
'Referer: ' + endpoint_uri + page_url ,
'Content-Type: application/x-www-form-urlencoded',
'Cookie: ' + cookie["cookie_name"] + '=' + cookie["cookie_value"],
'Connection: keep-alive',
'Upgrade-Insecure-Requests: 1',
'DNT: 1'
]
return headers |
def memory_setting(label):
"""
Parse user-supplied memory setting.
Converts strings into floats, representing a number of bytes. Supports the
following notations.
- raw integers: 1, 1000, 1000000000
- scientific notation: 1, 1e3, 1e9
- "common" notation: 1, 1K, 1G
Suffixes supported: K/k, M/m, G/g, T/t. Do not include a trailing B/b.
"""
suffixes = {
'K': 1000.0,
'M': 1000.0 ** 2,
'G': 1000.0 ** 3,
'T': 1000.0 ** 4,
}
try:
mem = float(label)
return mem
except ValueError:
prefix = label[:-1]
suffix = label[-1:].upper()
if suffix not in suffixes.keys():
raise ValueError('cannot parse memory setting "{}"'.format(label))
try:
multiplier = float(prefix)
return multiplier * suffixes[suffix]
except ValueError:
raise ValueError('cannot parse memory setting "{}"'.format(label)) |
def encode_count_name(count_name):
"""Encode a name to printable ASCII characters so it can be safely
used as an attribute name for the datastore."""
encoded = []
append = encoded.append
for ch in map(ord, count_name):
if ch == 92:
append('\\\\')
elif 33 <= ch <= 126:
append(chr(ch))
else:
append('\\u%04x' % ch)
return ''.join(encoded) |
def flatten_list(list_):
"""
Flatten list.
Turn list of lists into a list of all elements.
[[1], [2, 3]] -> [1, 2, 3]
"""
return [item for sublist in list_ for item in sublist] |
def insertion_sort(source_array: list) -> list:
"""
Implementation of the polynomial O(n^2) Insertion Sort algorithm
Arguments:
source_array - array of integers to be sorted
Returns:
Contents of array argument
"""
# Create copy to avoid affecting source array
array = source_array
# Everything to the left of index i is sorted
for i in range(1, len(array)):
key = array[i]
# Insert current key into sorted sequence
j = i - 1
# Iterate until current position >= key
while j >= 0 and array[j] > key:
array[j + 1] = array[j]
j -= 1
array[j + 1] = key
return array |
def parse_sheets_for_get_response(sheets: list, include_grid_data: bool) -> list:
"""
Args:
sheets (list): this is the sheets list from the Google API response
include_grid_data (bool): will determine in what manner to parse the response
Returns:
list : The sheets after the relevant data was extracted.
This function will be called only upon include_grid_data = true
"""
sheet_lst = []
for sheet in sheets:
output_sheet = {}
properties = sheet.get('properties', {})
output_sheet['title'] = properties.get('title')
output_sheet['sheetId'] = properties.get('sheetId')
output_sheet['index'] = properties.get('index')
output_sheet['gridProperties'] = properties.get('gridProperties')
row_data: list = []
if not include_grid_data:
output_sheet['rowData'] = []
sheet_lst.append(output_sheet)
continue
response_rows_data = sheet.get('data', {})[0].get('rowData', None)
if not response_rows_data:
row_data.append({'values': []})
else:
for response_values in response_rows_data:
values = []
if not response_values:
row_data.append({'values': []})
else:
for response_cell_data in response_values.get('values'):
if not response_cell_data:
values.append('')
else:
values.append(response_cell_data.get('formattedValue'))
row_data.append({'values': values})
output_sheet['rowData'] = row_data
sheet_lst.append(output_sheet)
return sheet_lst |
def Pixel2pc(in_pixel, in_size, out_range):
"""Convert points in feature_map coordinate system to lidar coordinate system."""
res = 2.0 * out_range / in_size
return out_range - (in_pixel + 0.5) * res |
def simple_transfer(type, file_path, out_path=None):
"""
simple_transfer [summary]
Args:
type (str): File's encoding type
file_path (str): Path to the file
out_path (str, optional): Path to the output file. Defaults to None.
Returns:
[bool]: whether transfer the encoding type successfully.
"""
with open(file_path, 'rb') as f:
data = f.read()
try:
data = data.decode(type).encode('utf-8')
except:
return False
with open(out_path if out_path else file_path, 'wb') as out_f:
out_f.write(data)
return True |
def _update_together_save_hook(instance, *args, **kwargs):
"""
Sets ``update_fields`` on :meth:`~django.db.models.Model.save` to include \
any fields that have been marked as needing to be updated together with \
fields already in ``update_fields``.
:return: (continue_saving, args, kwargs)
:rtype: :class:`tuple`
"""
if 'update_fields' in kwargs:
new_update_fields = set(kwargs['update_fields'])
for field in kwargs['update_fields']:
new_update_fields.update(instance._meta.update_together.get(field, []))
kwargs['update_fields'] = list(new_update_fields)
return(True, args, kwargs) |
def count_valid( data, part2=False ):
"""
For Part 1:
-----------
The valid passports are those that contain the following required fields:
byr, iyr, eyr, hgt, hcl, ecl, pid, cid. The field 'cid' is optional.
For Part 2:
-----------
Along with the rules for the presence of required fields from Part 1,
additional rules are now considered for the values of those fields.
These rules are defined in the `valid_values` dictionary below.
This function returns the number of valid passports defined by the correponding
rules.
"""
required_fields = {'byr','iyr','eyr','hgt','hcl','ecl','pid','cid'}
allowed_missing = {'cid'}
valid_values = { 'byr':[4, 1920, 2002],
'iyr':[4, 2010, 2020],
'eyr':[4, 2020, 2030],
'hgt':[['cm', 150, 193], ['in', 59, 76]],
'hcl':['#',6, {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}],
'ecl':{'amb','blu', 'brn','gry','grn','hzl','oth'},
'pid': 9
}
count = 0
for num, entry in enumerate(data):
if len(entry) > 0:
entry_fields = set( entry.keys() )
flag1 = required_fields & entry_fields == required_fields
flag2 = required_fields - entry_fields == allowed_missing
flag3 = True
if part2:
value_flags = dict()
# Check years
for year in ['byr','iyr','eyr']:
if entry[year] is not None:
value_flags[year] = ( len( entry[year] ) == valid_values[year][0] and
valid_values[year][1] <= int(entry[year]) <= valid_values[year][2]
)
# Check height
if entry['hgt'] is not None:
hgt_flag = entry['hgt'][-2:] in { 'cm', 'in' }
if hgt_flag and entry['hgt'][-2:] == 'cm':
hgt_flag = hgt_flag and 150 <= int(entry['hgt'][:-2]) <= 193
elif hgt_flag and entry['hgt'][-2:] == 'in':
hgt_flag = hgt_flag and 59 <= int(entry['hgt'][:-2]) <= 76
value_flags['hgt'] = hgt_flag
# Check hair color
if entry['hcl'] is not None:
value_flags['hcl'] = ( entry['hcl'][0] == '#' and
len( entry['hcl'][1:] ) == 6 and
set( entry['hcl'][1:] ).issubset( valid_values['hcl'][2] )
)
# Check eye color
if entry['ecl'] is not None:
value_flags['ecl'] = entry['ecl'] in valid_values['ecl']
# Check passport id
if entry['pid'] is not None:
value_flags['pid'] = len( entry['pid'] ) == 9
# AND over value_flags
flag3 = False not in value_flags.values()
if (flag1 or flag2) and flag3:
count += 1
return count |
def identify_necessary_covariates(dependents, definitions):
"""Identify covariates necessary to compute `dependents`.
This function can be used if only a specific subset of covariates is necessary and
not all covariates.
See also
--------
respy.likelihood._compute_x_beta_for_type_probability
"""
dependents = {dependents} if isinstance(dependents, str) else set(dependents)
new_dependents = dependents.copy()
while new_dependents:
deps = list(new_dependents)
new_dependents = set()
for dependent in deps:
if dependent in definitions and definitions[dependent]["depends_on"]:
dependents |= definitions[dependent]["depends_on"]
new_dependents |= definitions[dependent]["depends_on"]
else:
dependents.remove(dependent)
covariates = {dep: definitions[dep] for dep in dependents}
return covariates |
def line2dict(st):
"""Convert a line of key=value pairs to a
dictionary.
:param st:
:returns: a dictionary
:rtype:
"""
elems = st.split(',')
dd = {}
for elem in elems:
elem = elem.split('=')
key, val = elem
try:
int_val = int(val)
dd[key] = int_val
except ValueError:
dd[key] = val
return dd |
def task_filter(f, iterator):
"""
:param f:
:param iterator:
:return:
"""
ret = list()
for i in iterator:
if f(i):
ret.append(i)
return ret |
def get_pass_number(minutes, orbits_per_day):
"""
Gets appropriately formatted pass number string.
Parameters
----------
minutes : float
Minutes elapsed in the day since midnight UTC.
orbits_per_day : float
The number of orbits per day, around 15 for vehicles in low earth orbit.
Returns
-------
str
"""
return '{0:02d}'.format(int(round(minutes*orbits_per_day/1440.))) |
def isnone(x, default):
"""Return x if it's not None, or default value instead."""
return x if x is not None else default |
def call_with_args(callable, args):
"""Return an example of a call to callable with all its standard arguments.
>>> call_with_args('fun', ['x', 'y'])
'fun(x, y)'
>>> call_with_args('fun', [('a', 'b'), 'c'])
'fun((a, b), c)'
>>> call_with_args('fun', ['a', ('b', ('c', 'd'))])
'fun(a, (b, (c, d)))'
"""
def call_arglist(args):
if isinstance(args, (list, tuple)):
return "(%s)" % ', '.join(map(call_arglist, args))
return args
return "%s%s" % (callable, call_arglist(args)) |
def rotate(input_s, bytes_):
"""
Rotate a string by a number of bytes
Args:
input_s (bytes): the input string
bytes_ (int): the number of bytes to rotate by
Returns:
(bytes) s1 rotated by n bytes
"""
return bytes(input_s[(i + bytes_) % len(input_s)] for i in range(len(
input_s))) |
def not_(predicate: dict):
"""
Inverse predicate
:param predicate: Predicate to invert
:return: dict
"""
res = predicate.copy()
res['inverse'] = True
return res |
def map_platforms(platforms):
""" Takes in a list of platforms and translates Grinder platorms to corresponding GitHub-hosted runners.
This function both modifies and returns the 'platforms' argument.
"""
platform_map = {
'x86-64_windows': 'windows-latest',
'x86-64_mac': 'macos-latest',
'x86-64_linux': 'ubuntu-latest'
}
for i, platform in enumerate(platforms):
if platform in platform_map:
platforms[i] = platform_map[platform]
return platforms |
def retr_radiroch(radistar, densstar, denscomp):
"""
Return the Roche limit
Arguments
radistar: radius of the primary star
densstar: density of the primary star
denscomp: density of the companion
"""
radiroch = radistar * (2. * densstar / denscomp)**(1. / 3.)
return radiroch |
def status_string(value, min):
"""
Checks to see if value is one of the ShakeMap status string of
'automatic', 'released', or 'reviewed. Raises a ValidateError
exception on failure.
Args:
value (str): A status string.
Returns:
str: The input string. 'automatic' is returned if value is empty.
"""
if not value:
return 'automatic'
if value not in ('automatic', 'released', 'reviewed'):
raise ValidateError(value)
return value |
def _process_keyword(obj, target, source, keyargs, default=None):
""" Set obj.target from:
- keyargs[source]
- default
- obj.source
in that order."""
arg = keyargs.get(source)
if arg is not None:
setattr(obj, target, arg)
elif default is not None:
setattr(obj, target, default)
elif hasattr(obj, source):
setattr(obj, target, getattr(obj, source))
return arg |
def create_category_dict(keywords_list):
"""
Creating dictionary of topics and initializing with values as empty list.
Had to find an efficient way of concatenating all reviews belonging to one category
"""
reviews_per_category = {}
for topic in keywords_list:
reviews_per_category[topic] = []
return reviews_per_category |
def _manhattan(from_x, from_y, to_x, to_y):
"""Calculates the manhattan distance between 2 points in 2D space.
Parameters
----------
from_x : int or float
The x coordinate of the 1st point.
from_y : int or float
The y coordinate of the 1st point.
to_x : int or float
The x coordinate of the 2nd point.
to_y : int or float
The y coordinate of the 2nd point.
Returns
-------
int
The manhattan distance between the 2 points. Rounded to the
nearest int.
"""
return int(round(abs(to_x - from_x) + abs(to_y - from_y))) |
def chebyshev_chix_asymptotic(n):
"""
Returns the asymptotic value of chebyshev chi(x) function
Parameters
----------
n : int
denotes positive integer
return : int
returns the asymptotic estimate of chebyshev chi(x) function for n
"""
if(n!=int(n) or n<1):
raise ValueError(
"n must be positive integer"
)
return int(n) |
def fbool(value):
"""Boolean value from string or number"""
if isinstance(value, str):
value = value.lower()
if value == "false":
value = False
elif value == "true":
value = True
elif value:
value = bool(float(value))
else:
raise ValueError("Got empty string!")
else:
value = bool(float(value))
return value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.