content stringlengths 42 6.51k |
|---|
def hybrid_substation(input_dict):
"""
Function to calculate Substation Cost in USD
Parameters
-------
interconnect_voltage_kV
(in kV)
project_size_megawatts
(in MW)
Returns:
-------
substation_cost
(in USD)
"""
output_dict = dict()
if input_dict['hybrid_substation_rating_MW'] > 15:
output_dict['substation_cost_usd'] = \
11652 * (input_dict['interconnect_voltage_kV'] + input_dict['hybrid_substation_rating_MW']) + \
11795 * (input_dict['hybrid_substation_rating_MW'] ** 0.3549) + 1526800
else:
if input_dict['hybrid_substation_rating_MW'] > 10:
output_dict['substation_cost_usd'] = 1000000
else: # that is, < 10 MW_AC
output_dict['substation_cost_usd'] = 500000
substation_cost_usd = output_dict['substation_cost_usd']
return substation_cost_usd |
def list2str(l):
"""Convert list to matrix-string"""
s = ""
for i in range(len(l)):
s += "%.6e" % l[i]
if i < len(l) -1:
s += " "
return s |
def text_of_segments(segments):
"""
>>> segments = ['Hi there! ', 'My name is Peter.']
>>> text_of_segments(segments)
'Hi there! My name is Peter.'
"""
return ''.join(segments) |
def _to_j2kt_name(name):
"""Convert a label name used in j2cl to be used in j2kt"""
if name.endswith("-j2cl"):
name = name[:-5]
return "%s-j2kt" % name |
def start_d(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
In future a sophisticated procedure will be provided.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
if lval == rval:
try:
v = model.effects_loadings.get(lval, 0.1) / 2
except (AttributeError, TypeError):
v = 0.05
return v
return 0.0 |
def _remove_duplicates_and_sort(my_list):
"""Return sorted array of unique values."""
# Casting to set removes duplicates
# Sorting set also casts it back to a list
return sorted(set(my_list)) |
def get_schedule_weekdays(schedule):
"""Return a list of weekdays the specified schedule is active."""
return [schedule["weekday"]] if schedule.get("weekday", None) is not None else range(1, 8) |
def format_transfer(b: int) -> str:
""" Format a number of bytes in a more human readable format """
symbols = [("T", 1 << 40), ("G", 1 << 30), ("M", 1 << 20), ("K", 1 << 10)]
if b < 0:
raise ValueError("Must be bigger than 0")
for symbol, size in symbols:
if b >= size:
return f"{b / size:.1f} {symbol}"
return str(b) |
def unbox(numer, denom, multiplier):
"""Attempts to convert the fractional unit represented by the parameters
into another, simpler type. Returns the simpler unit or None if no
simplification is possible.
"""
if not denom and not numer:
return multiplier
if not denom and len(numer) == 1 and multiplier == 1:
return numer[0]
return None |
def _hexify(i, digits=4):
""" convert an integer into a hex value of a given number of digits"""
format_string = "0%dx" % digits
return format(i, format_string).upper() |
def merge_sorted_arrays2(array1, array2):
"""
if array1_index >= len(array1) =>
add the rest of array2 to new_array
else if array2_index >= len(array2) =>
add the rest of array1 to new_array
else if array1[0] >= array2[0] =>
new_array.append(array1[0])
array1_index += 1
and vice versa for array2
if array1_index >= len(array1) => add the rest of array2 to new_array
"""
new_array = []
array1_index, array2_index = 0, 0
while len(new_array) < len(array1) + len(array2):
if array1_index >= len(array1):
new_array += array2[array2_index:]
elif array2_index >= len(array2):
new_array += array1[array1_index:]
else:
if array1[array1_index] <= array2[array2_index]:
new_array += [array1[array1_index]]
array1_index += 1
else:
new_array += [array2[array2_index]]
array2_index += 1
return new_array |
def sort_012(input_list):
"""
Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal.
Args:
input_list(list): List to be sorted
"""
if not input_list:
return []
if len(input_list) == 0 or len(input_list) == 1:
return input_list
lo = 0
hi = len(input_list) - 1
i = 0
while i <= hi:
num = input_list[i]
if num == 0:
input_list[i] = input_list[lo]
input_list[lo] = 0
lo += 1
i += 1
elif num == 2:
input_list[i] = input_list[hi]
input_list[hi] = 2
hi -= 1
elif num == 1:
i += 1
return input_list |
def validate_domain_name(value):
"""Raise exception if the domain_name has invalid length."""
if len(value) > 256:
return "have length less than or equal to 256"
return "" |
def get_reduction_cell(cell_name):
"""Return reduction cell spec."""
operations = []
hiddenstate_indices = []
used_hiddenstates = []
if cell_name == 'evol_net_g' or cell_name == 'amoeba_net_a':
operations = ['separable_3x3_2', 'avg_pool_3x3', 'max_pool_3x3',
'separable_7x7_2', 'max_pool_3x3', 'max_pool_3x3',
'separable_3x3_2', '1x7_7x1', 'avg_pool_3x3',
'separable_7x7_2']
hiddenstate_indices = [1, 0, 0, 2, 1, 0, 4, 0, 1, 0]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
elif cell_name == 'evol_net_h' or cell_name == 'amoeba_net_b':
operations = ['max_pool_2x2', 'max_pool_3x3', 'none', '3x3',
'dil_2_separable_5x5_2', 'max_pool_3x3', 'none',
'separable_3x3_2', 'avg_pool_3x3', '1x1']
hiddenstate_indices = [0, 0, 2, 1, 2, 2, 3, 1, 4, 3]
used_hiddenstates = [1, 1, 1, 1, 1, 0, 0]
elif cell_name == 'evol_net_a' or cell_name == 'amoeba_net_c':
operations = ['max_pool_3x3', 'max_pool_3x3', 'separable_7x7_2',
'separable_3x3_2', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_5x5_2', 'max_pool_3x3',
'separable_3x3_2']
hiddenstate_indices = [0, 0, 2, 0, 0, 1, 4, 4, 1, 1]
used_hiddenstates = [0, 1, 0, 0, 0, 0, 0]
elif cell_name == 'evol_net_x' or cell_name == 'amoeba_net_d':
operations = ['max_pool_2x2', 'max_pool_3x3', 'none', '3x3', '1x7_7x1',
'max_pool_3x3', 'none', 'max_pool_2x2', 'avg_pool_3x3',
'1x1']
hiddenstate_indices = [0, 0, 2, 1, 2, 2, 3, 1, 2, 3]
used_hiddenstates = [1, 1, 1, 1, 0, 0, 0]
else:
raise ValueError('Unsupported cell name: %s.' % cell_name)
return operations, hiddenstate_indices, used_hiddenstates |
def sum_digits(n):
"""Sum all the digits of n.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
"""
"*** YOUR CODE HERE ***"
total = 0
while n>0:
total = total+ n%10
n=n//10
return total |
def video_content_to_dict(vid_info_list):
"""Convert YouTube metadata list to dictionary."""
video_dict = {}
for video in vid_info_list:
if not video:
continue
title = video["title"]
video_dict[title] = {"id": video["id"], "duration": video["duration"]}
return video_dict |
def get_conic_mat_by_five_point(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5):
"""
a11 * x^2 + 2a12 * xy + a22 * y^2 + 2a1 * x + 2a2 * y + a0 = 0
"""
def row(x, y):
return [x ** 2, 2 * x * y, y ** 2, 2 * x, 2 * y, 1]
return [row(*point) for point in zip([x1, x2, x3, x4, x5], [y1, y2, y3, y4, y5])] |
def get_item(path_id):
"""input specified path from DynamoDB"""
return dict(Key={"PK": path_id, "SK": path_id}) |
def handle_url(a_url):
"""
utilities that handle multiple url, return a list of escaped url
:param a_url:
:return: list of url escaped
"""
d = 'http'
return [d+e.replace("\\", "") for e in a_url.split(d) if e != ""] |
def tuplize(nested):
"""Recursively converts iterables into tuples.
Args:
nested: A nested structure of items and iterables.
Returns:
A nested structure of items and tuples.
"""
if isinstance(nested, str):
return nested
try:
return tuple(map(tuplize, nested))
except TypeError:
return nested |
def checkPrefix(mainList, inputGTParams):
""" Compares two input GTs to see if they have the same prefix. Returns the index in the internal list of GTs of the match
or -1 in case of no match. """
if inputGTParams.find("_") == -1:
raise Exception("Invalid GT name. It does not contain an _, it cannot be used for replacements.")
prefix = inputGTParams.split("_")[0]
for i in range(0, len(mainList)):
if mainList[i].split("_")[0] == prefix:
return i
return -1 |
def _sim_colour(r1, r2):
"""
calculate the sum of histogram intersection of colour
"""
return sum([min(a, b) for a, b in zip(r1["hist_c"], r2["hist_c"])]) |
def combine(list_of_values, sep=','):
"""Split and sum list of sep string into one list."""
combined = sum(
[str(values).split(sep) for values in list(list_of_values)],
[]
)
if combined == ['-']:
combined = None
return combined |
def next_event_name(trace: list, prefix_length: int):
"""Return the event event_name at prefix length or 0 if out of range.
"""
if prefix_length < len(trace):
next_event = trace[prefix_length]
name = next_event['concept:name']
return name
else:
return 0 |
def get_po_line_created_date(po_line_record):
"""Get created date from PO Line record or return a note that it was not found."""
if po_line_record.get("created_date") is not None:
po_line_created_date = "".join(
filter(str.isdigit, po_line_record["created_date"][2:])
)
else:
po_line_created_date = "No PO Line created date found"
return po_line_created_date |
def sha256(obj: bytes) -> bytes:
"""
hash any bytes with sha3-256
:param obj: bytes object
:return: the hash
"""
from hashlib import sha3_256
h = sha3_256()
h.update(obj)
return h.digest() |
def cpf_has_correct_length(value):
"""
This function receives the Brazilian CPF and returns True if the length of CPF is 11 or False if not.
:param value: A string with the number of Brazilian CPF
:return:
"""
if len(value) != 11:
return False
else:
return True |
def resource_workspace_required(account_type):
"""Indicate whether a given account type requires a Terraform workspace for resources."""
requires_resources_workspace = account_type.lower() in ["application", "sandbox"]
return requires_resources_workspace |
def get_dist_prob_wout(t1, t2, w1, w2, decay_factor):
"""
Get p_dist * w_dist
"""
if t1 < t2:
w1 *= decay_factor
else:
w2 *= decay_factor
return 1./6. * (w1 + w2 + 4 * w1 * w2) |
def _readlines(fname, fpointer1=open, fpointer2=open):
"""Read all lines from file."""
# fpointer1, fpointer2 arguments to ease testing
try: # pragma: no cover
with fpointer1(fname, "r") as fobj:
return fobj.readlines()
except UnicodeDecodeError: # pragma: no cover
with fpointer2(fname, "r", encoding="utf-8") as fobj:
return fobj.readlines() |
def _format_text(text, args):
"""
Format text.
"""
if args:
return text % args
return str(text) |
def to_hash(string):
"""converts strings to hash
t_size = size of hash table
default is set to 25"""
prod = 0
# convert letters to ordinal values
# then hash by taking modulo
for pos in range(len(string)):
prod += int(ord(string[pos]))
return prod |
def arg_eval(arg):
"""Evaluate an argument."""
try:
return eval(arg)
except: # If it doesn't evaluate to a real python type assume it's a string.
return arg |
def female_filter(variable):
"""
Simple function to know the gender by name ending. If name ends with 'a' - it is female's name.
"""
return variable[-1].lower() == 'a' |
def get_work_position(hrp_wkr, position_num):
"""
:param position_num: [0..]
"""
if hrp_wkr is not None:
if position_num == 0:
if hrp_wkr.primary_position is not None:
return hrp_wkr.primary_position
if position_num >= 1:
index = position_num - 1
if len(hrp_wkr.other_active_positions) > index:
return hrp_wkr.other_active_positions[index]
return None |
def byte_size(number=0):
"""
Return the byte size that this number fits in
"""
push_size = 0
for p in list(range(256, 0, -8)):
if int(number / (2 ** p)) > 0:
push_size = int(p / 8)
break
return push_size + 1 |
def lowercase_set(sequence):
""" Create a set from sequence, with all entries converted to lower case.
"""
return set((x.lower() for x in sequence)) |
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
count=0
handtemp=hand.copy()
if word=='':
return False
else:
if word in wordList:
for char in word:
if char in handtemp and handtemp.get(char)!=0:
count=count+1
handtemp[char]=handtemp.get(char)-1
return count==len(word)
else:
return False |
def copy_model_post_fn(line):
"""unique sentence end !!!! => !
:param line:
:return:
"""
tokens = line.strip().split(" ")
if len(tokens) == 0:
return ""
else:
last_token = tokens[-1]
new_last_token = []
char_set = set()
for char in last_token:
if char not in new_last_token:
new_last_token.append(char)
new_last_token = "".join(new_last_token)
tokens[-1] = new_last_token
return " ".join(tokens) |
def format_bucket_prefix(base_prefix: str, dirname: str) -> str:
"""Format an S3 bucket key prefix by joining a base prefix with a directory
name.
"""
base_prefix = base_prefix.rstrip("/").lstrip("/")
dirname = dirname.lstrip("/")
prefix = "/".join((base_prefix, dirname))
if not prefix.endswith("/"):
prefix = prefix + "/"
return prefix |
def table_entry_size(name, value):
"""
Calculates the size of a single entry
This size is mostly irrelevant to us and defined
specifically to accommodate memory management for
lower level implementations. The 32 extra bytes are
considered the "maximum" overhead that would be
required to represent each entry in the table.
See RFC7541 Section 4.1
"""
return 32 + len(name) + len(value) |
def urljoin(*args):
"""
Joins given arguments into an url. Trailing but not leading slashes are
stripped for each argument.
"""
return "/".join(map(lambda x: str(x).rstrip('/'), args)) |
def _clean_qp(query_params):
"""
Strip 'event.' prefix from all query params.
:rtype : QueryDict
:param query_params: dict self.request.query_params
:return: QueryDict query_params
"""
query_params = query_params.copy() # do not alter original dict
nspace = 'event.'
for key in query_params.keys():
if key.startswith(nspace):
new_key = key[len(nspace):]
# .pop() returns a list(?), don't use
# query_params[new_key] = query_params.pop(key)
query_params[new_key] = query_params[key]
del query_params[key]
return query_params |
def prepare_query(whois_server, domain):
"""
Some WHOIS servers have a different way of querying.
This methods returns an appropriate query for the WHOIS server
:param domain: The domain to query
:return: The fitting query
"""
if whois_server == "whois.jprs.jp":
return "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and (whois_server == "whois.denic.de" or whois_server == "de.whois-servers.net"):
return "-T dn,ace %s" % domain # regional specific stuff
elif whois_server == "whois.verisign-grs.com":
return "=%s" % domain # Avoid partial matches
else:
return domain |
def modif(res):
"""
From a "Result" entry, get the modifier id string
"""
if "@With" in res:
if res["@With"] == "VK_NUMLOCK":
return "numpad"
if res["@With"] == "VK_SHIFT":
return "shift"
if res["@With"] == "VK_CONTROL VK_MENU":
return "altgr"
if res["@With"] == "VK_MENU":
return "alt"
return ""
else:
return "letter" |
def getPassTests(skus, items):
"""Input tests"""
passTest = True
# check string
if not isinstance(skus, str):
passTest = False
# check valid skus
if not set(skus).issubset(items):
passTest = False
return passTest |
def parse_message_body(message_body):
""" Parse the message body and return datetime. """
year = message_body['schedule']['year']
month = message_body['schedule']['month']
day = message_body['schedule']['day']
hour = message_body['schedule']['hour']
minute = message_body['schedule']['minute']
tz_offset = message_body['tz_offset']
# Datetime string and format
dttm_str = f'{year}-{month}-{day} {hour}:{minute}:00 {tz_offset}'
return dttm_str |
def choseNextBranch(branchesVisited,node,previousBranch,reverseOrder):
"""
Returns the next branch that has not already been used in this direction,
at node 'node' and coming from branch 'previousBranch'.
The default order is clockwise but if 'reverseOrder' is True, the order is
counter-clockwise.
"""
started = False
for k in range( 2*len(branchesVisited[node]) + 1 ):
if reverseOrder:
k = -k
b = k % len(branchesVisited[node])
if started and (not branchesVisited[node][b]):
return b
if b == previousBranch:
started = True |
def getZones(gdict):
"""
Returns Zones in a OD file
Parameters
----------
gdict: a demand dictiornary
Returns
-------
a list with the Zones of the network
"""
sources = [s for (s, t) in gdict.keys()]
targets = [t for (s, t) in gdict.keys()]
sources.extend(targets)
return set(sources) |
def check_uniqueness_in_rows(board: list):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', '*35214*', '*41532*', '*2*1***'])
False
"""
for i in range(1,len(board)-1):
lst = list(board[i])
for j in range(1,len(lst)-1):
if lst[j] != "*" and lst[1:-1].count(lst[j]) != 1:
return False
return True |
def astype(records, data_type):
"""A Pandas astype-like function.
Args:
records (list): A 1-D list.
data_type (data type): The target data type.
Returns:
list: The converted records.
Examples:
>>> l = ['1', '2', '3']
>>> astype(l, int)
"""
return [data_type(record) for record in records] |
def IsLeapYear(year):
"""Returns 1 if year is a leap year, zero otherwise."""
if year%4 == 0:
if year%100 == 0:
if year%400 == 0:
return 1
else:
return 0
else:
return 1
else:
return 0 |
def author_id_string(aob):
"""
Produce a string representation of an author id
:param aob: author object
:return: string representation of author id
"""
return u"{x}: {y}".format(x=aob.get("type"), y=aob.get("id")) |
def get_threshold(train_step):
"""
Returns p (in (0,1)) such that optimal action taken with probability p.
The policy is Greedy in the limit of infinity.
Parameters:
train_step: The iteration index
Returns:
p: scaler
"""
x=float(train_step)
if x==0:
return 0
x=x/5
return float(x)/(1+float(x)) |
def _versionTuple(versionStr):
"""Returns a tuple of int's (1, 81, 3) from a string version '1.81.03'
Tuples allow safe version comparisons (unlike strings).
"""
try:
v = (versionStr.strip('.') + '.0.0.0').split('.')[:3]
except (AttributeError, ValueError):
raise ValueError('Bad version string: `{}`'.format(versionStr))
return int(v[0]), int(v[1]), int(v[2]) |
def _phred_char_to_pval(ch):
""" Transforming ASCII character in the Phred scale to the error rate"""
return 10 ** (-(float(ord(ch)) - float(33)) / 10) |
def monitor_channel_event(message_type, channel_id, comments={}):
"""
:param message_type:
:param channel_id: which channel name will be used to monitor
:param asset_type: legal asset type should be in ['TNC', 'ETH'] currently
:param comments:
:return:
"""
assert message_type, 'Invalid message_type<{}>.'.format(message_type)
assert channel_id, 'Invalid channel_id<{}>.'.format(channel_id)
return {
'messageType': message_type,
'playload': channel_id,
'comments': comments
} |
def lmap(*args, **kwargs):
"""Shortcut to return list when mapping"""
return list(map(*args, **kwargs)) |
def question_counts(counts, counts_validity, section_count=2):
"""
counts: 112,212 => [[1, 1, 2], [2, 1, 2]]
counts_validity: TTF,TTF => [[True, True, False], [True, True, False]]
counts: 112 => [[1, 1, 2], [1, 1, 2]]
counts_validity: None => [[True, True, True], [True, True, True]]
Args:
:param counts: A string which shows question counts and required submissions
:param counts_validity:
:param section_count:
"""
if ',' in counts:
return [[int(c) for c in sec] for sec in counts.split(',')],\
[[v == 'T' for v in sec] for sec in counts_validity.split(',')]
else:
validity = [True] * len(counts) if counts_validity is None\
else [v == 'T' for v in counts_validity]
return [[int(c) for c in counts]] * section_count, [validity] * section_count |
def cipher(text, shift, encrypt=True):
"""
One of the simplest and most widely known encryption techniques.
Inputs
----------
a single word/ words/ the text contains symbols/ a sentence
Outputs
-------
For each letter in inputs, it is replaced by a letter some fixed number of positions down the alphabet.
Examples
--------
>>> example = 'Apple'
>>> encrypting = cipher(example, 1, )
>>> print(encrypting)
Bqqmf
>>> decrypting = cipher(encrypting, 1, False)
>>> print(decrypting)
Apple
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text |
def fragment_remover(smi):
"""
# deal with molecule with more than one covalently bonded unit
# smiles separated by dot (*.***):
# NCCO.NC(=O)c1ccc2cc(CCC3=C(NC(=O)NC3=O)C(=O)O)ccc2c1
# [Na+].COc1nc(cc2nc([n-]c12)c3c(Cl)c(nn3C)C(C)(C)C)c4c(Cl)cccc4Cl
# remove salts etc.
# keep the largest covalent unit in a molecule with multiple fragments
#
"""
# only compare the length of smiles of fragments
# randomly pick one if the length of two fragments(smiles) equals
if smi.find('.') != -1:
smi_frag = smi.split('.')
#smi_frag = [it.strip('+]') for it in smi_frag]
smi = max(smi_frag, key = len)
smi = smi.replace('@', '')
return(smi) |
def normalize_filename(filename):
"""Normalizes the name of a file. Used to avoid characters errors and/or to
get the name of the dataset from a url.
Args:
filename (str): The name of the file.
Returns:
f_name (str): The normalized filename.
"""
f_name = filename.split('/')[-1]
f_name = f_name.split('?')[0]
return f_name |
def filter_tweet(tweet, required_phrases, avoid_phrases=[]):
"""
Filter that keeps tweets that have at least one required phrase
and does not have any of the avoid phrases.
Parameters
----------
required_phrases: list of str
list of required phrases
avoid_phrases: list of str
list of phrases that must not be in relevant tweet.
"""
tweet_text = tweet['text']
# If the required phrase is NOT in the text then
# return False.
required_phrase_in_text = False
for required_phrase in required_phrases:
if required_phrase in tweet_text:
# This text has at least one required phrase.
required_phrase_in_text = True
break
if not required_phrase_in_text:
# This text has no required phrases.
return False
# If one or more of the avoid phrases is in the
# text then return False.
avoid_phrase_in_text = False
for avoid_phrase in avoid_phrases:
if avoid_phrase in tweet_text:
# At least one avoid phrase is in the text.
avoid_phrase_in_text = True
break
if avoid_phrase_in_text:
return False
#
# Text has at least one required phrase and has none
# of the avoid phrases. So, return True
return True |
def get_feat_dic(fea_array):
"""Get feature dictionary."""
fea_dic = {}
for row in fea_array:
fea_dic[row[0]] = row[1:]
return fea_dic |
def __getdelta ( transfers ,
z,
omegabh2 ,
omegach2 ,
omeganuh2 ,
H0 ):
"""
"""
h = H0/100.
Omegab = omegabh2 / h / h
Omegac = omegach2 / h / h
Omegan = omeganuh2 / h / h
rhob = Omegab* (1. + z )**3.0
rhocdm = Omegac* (1. + z)**3.0
rhonm = Omegan * (1. + z) **3.0
return 0 |
def fibonacci_at_position(position):
"""Return Fibonacci number at specified position"""
current_position = 0
previous_number, current_number = 0, 1
while current_position < position:
current_position += 1
previous_number, current_number = current_number, previous_number + current_number
return previous_number |
def clues_pay(text: str) -> bool:
""" Check for messages about payments """
text = text.lower()
for clue in ("credits", "paym", "expired", "exceeded"):
if clue in text:
return True
return False |
def getname(url):
"""Split the URL from the username"""
return url.replace("https://", "").replace("www.", "").replace("instagram.com/", "").replace("/", "") |
def clip1d(tar_x, tar_w, src_w):
"""
Placing the src in the coordinates of the target
Example: tar_x = -1, tar_w = 5, src_w = 4
ssss
ttttt
"""
src_l = 0
src_r = src_w
tar_l = tar_x
tar_r = tar_x + src_w
# If target is off to the left then bound tar_l to 0 and push src_l over
if tar_l < 0:
src_l = -1 * tar_l
tar_l = 0
# This may cause src_l to go past its own width
# If src_l went past its own width (from the above)
if src_l >= src_w:
# src l went past its own width
return None, None, None
# If the target went over the target right edge
# 01234
# tttt
# ssss
if tar_r >= tar_w:
src_r -= tar_r - tar_w
# This may cause src_r to be less than the left
if src_r - src_l <= 0:
return None, None, None
return tar_l, src_l, src_r - src_l |
def p1_while(input_list):
"""Compute some of numbers in list with while loop."""
out = 0
count = 0
while count < len(input_list):
out += input_list[count]
return out |
def parse_cluster_namespaces_output(get_namespaces_output):
"""
Function will perform a manipulation on a string output from the 'kubectl get namespaces' command
Returns an array of dicts with installed repos names and urls as strings
as [{'name': 'some_namespace', 'status': 'active', 'age': '5h20m'}]
by validating the first line, splitting by the tab delimiter,
and checking that the first (0) value is 'NAME' second (1) value is 'STATUS' and third (2) is 'AGE'
an exception will be raised if the structure was change by kubectl developers
:param get_namespaces_output: 'kubectl get namespaces' output as String
:return:
"""
available_namespaces = []
# split get_namespaces_output by 'new line'
get_namespaces_stdout = get_namespaces_output.split("\n")
# Perform validation on stdout of first (0) line
first_line_stdout = get_namespaces_stdout[0].split()
if first_line_stdout[0].strip() != 'NAME' or \
first_line_stdout[1].strip() != 'STATUS' or \
first_line_stdout[2].strip() != 'AGE':
raise Exception("'kubectl get namespaces' command output changed, "
"code change is needed to resolve this issue, "
"contact the developer.")
# for every line in existing namespaces, excluding the headers line (NAME, STATUS and AGE)
for line in get_namespaces_stdout[1:]:
# each stdout 'kubectl get namespaces' line composed by tabs delimiter, split it
namespace_details = line.split()
temp_dictionary = {}
if namespace_details[0] != "":
# Add current line repo values to dict
temp_dictionary.update({'name': namespace_details[0].strip()})
temp_dictionary.update({'status': namespace_details[1].strip()})
temp_dictionary.update({'age': namespace_details[2].strip()})
# Update final array with the temp array of dicts of current repo
available_namespaces.append(temp_dictionary)
return available_namespaces |
def _check_num(s):
"""
Checks if string is a number
"""
try:
float(s)
return True
except ValueError:
return False |
def create_html_table(data: list) -> str:
"""Creates table encoded in HTML, where columns are sorted based on
the column names of each dict key.
Parameters
----------
data : list
A list of dicts, where each dict has "col_name": value pairs
Returns
-------
str
An HTML table
"""
# Encode headers into HTML
sorted_headers = sorted(data[0])
headers = "".join([f"<th>{x}</th>" for x in sorted_headers])
header_row = f"<tr>{headers}</tr>"
# Encode table data
table_data = ""
for dict_row in data:
sorted_data = [dict_row[x] for x in sorted_headers]
row_data = "".join([f"<td>{x}</td>" for x in sorted_data])
table_row = f"<tr>{row_data}</tr>"
table_data += table_row
# Combine into a single table
table = f"<table>{header_row}{table_data}</table>"
return table |
def fast_exponentiation(base, exp, n):
"""
Iteratively finds the result of the expression (base**exp) mod n
"""
bin_exp = bin(exp)[2:]
output = 1
for i in bin_exp:
output = (output ** 2) % n
if i == "1":
output = (output*base) % n
return output |
def from_settings(settings):
"""Return a dict created from application settings.
Args:
settings (dict): An application's settings.
Returns:
dict: The database-specific settings, formatted to use with
:func:`connection_url`.
"""
return {
k.replace('DATABASE_', '', 1).lower(): v
for k, v in settings.items()
if k.startswith('DATABASE_')} |
def validate_data_keys(keyset_occurences):
"""Checks the dataset for valid keysets. The last contained keyset
with the most occurences will be assumed to be valid."""
valid_tuple = ()
validkeys = []
maxcount = 0
for key in list(keyset_occurences.keys()):
current_count = keyset_occurences[key]
if (current_count >= maxcount):
maxcount = current_count
valid_tuple = key
# Return a valid keyset as list.
for elem in valid_tuple:
validkeys.append(elem)
return validkeys |
def average_gradients(grads_list):
"""
Averages gradients coming from distributed workers.
Parameters
----------
grads_list : list of lists of tensors
List of actor gradients from different workers.
Returns
-------
avg_grads : list of tensors
Averaged actor gradients.
"""
avg_grads = [
sum(d[grad] for d in grads_list) / len(grads_list) if
grads_list[0][grad] is not None else 0.0
for grad in range(len(grads_list[0]))]
return avg_grads |
def file_type(filename, stream=False):
""" Detect potential compressed file
Returns the gz, bz2 or zip if a compression is detected, else None.
"""
magic_dict = {
"\x1f\x8b\x08": "gz",
"\x42\x5a\x68": "bz2",
"\x50\x4b\x03\x04": "zip"
}
max_len = max(len(x) for x in magic_dict)
if not stream:
with open(filename) as f:
file_start = f.read(max_len)
for magic, filetype in magic_dict.items():
if file_start.startswith(magic):
return filetype
else:
for magic, filetype in magic_dict.items():
if filename[:len(magic)] == magic:
return filetype
return None |
def binary_string_to_int(string):
"""
>>> binary_string_to_int('10111')
23
"""
digit = 1
result = 0
for c in string[::-1]:
if c == '1':
result += digit
digit *= 2
return result |
def est_null_prop_continous(pvalues):
"""
Equation 6 and 7 in Pounds et al., Bioinformatics 2006.
"""
a = [2 * min(p, 1 - p) for p in pvalues]
null_prop = min(1, 2 * sum(a) / len(a))
return null_prop |
def async_get_pin_from_uid(uid):
"""Get the device's 4-digit PIN from its UID."""
return uid[-4:] |
def list_extra_bonds(num_monos,bondstruc="centromere"):
"""
make a a list of bonds to add.
option "centromere" assumes chains were constructed with "segregation"
"""
if bondstruc == "centromere":
bondlist=[{"i":num_monos//4, "j":3*num_monos//4,"bondType":"Harmonic"}]
print("bondstruc is centromere")
else:#none.
bondlist=[]
print("bondstruc is none, input was", bondstruc)
return bondlist |
def step_learning_rate(base_lr, epoch, step_epoch, multiplier=0.1):
"""Sets the learning rate to the base LR decayed by 10 every step epochs"""
lr = base_lr * (multiplier**(epoch // step_epoch))
return lr |
def singleline_diff_format(line1, line2, idx):
"""
Inputs:
line1 - first single line string
line2 - second single line string
idx - index at which to indicate difference
Output:
Returns a three line formatted string showing the location
of the first difference between line1 and line2.
If either input line contains a newline or carriage return,
then returns an empty string.
If idx is not a valid index, then returns an empty string.
"""
# check line1 and line2 for newline or carriage return
if ("\n" in line1 or "\r" in line1) or ("\n" in line2 or "\r" in line2):
return ""
else:
# define short_line
if len(line1) <= len(line2):
short_line = line1
else:
short_line = line2
# check for valid index
if not 0 <= idx <= len(short_line):
return ""
else:
mid_line = ("=" * idx) + "^"
output = line1 + "\n" + mid_line + "\n" + line2 + "\n"
return output |
def fibo_number(n: int) -> int:
"""Returns the n-th Fibonacci number.
Arguments:
n -- The index of the desired number in the Fibonacci sequence.
Returns:
The n-th Fibonacci number.
"""
if not isinstance(n, int):
raise TypeError('Parameter n must an integer')
if n < 0:
raise ValueError('Parameter n must be non-negative')
if n == 0:
return 0
if n == 1:
return 1
return fibo_number(n - 1) + fibo_number(n - 2) |
def find_python_traceback(lines):
"""Scan a log file or other iterable for a Python traceback,
and return it as a list of lines.
In logs from EMR, we find python tracebacks in ``task-attempts/*/stderr``
"""
for line in lines:
if line.startswith('Traceback (most recent call last):'):
tb_lines = []
for line in lines:
tb_lines.append(line)
if not line.startswith(' '):
break
return tb_lines
else:
return None |
def format_value(name, value):
"""
Attempts to format the value as a float (if it has a "." in it) or an integer,
otherwise returns the original value.
"""
retval = value
if "name" in name:
pass
elif "." in value:
try:
retval = float(value)
except ValueError:
pass
else:
try:
retval = int(value)
except ValueError:
pass
return retval |
def simple_divide(item, denom):
"""
:param item: element of list of numbers
:param denom: integer, element at specific index of list of numbers
:return: float
"""
try:
return item / denom
# only handle the ZeroDivisionError.
except ZeroDivisionError:
return 0 |
def partition_by_suffix(iterable, func):
""" Given an iterable and a boolean-valued function which takes in
elements of that iterable, outputs a list of lists, where each list
ends in an element for which the func returns true, (except for the
last one)
e.g.
iterable := [1, 2, 3, 4, 5,5, 5]
func := lambda x: (x % 2) == 0
returns [[1,2], [3,4], [5, 5, 5]]
"""
output = []
sublist = []
for el in iterable:
sublist.append(el)
if func(el):
output.append(sublist)
sublist = []
if len(sublist) > 0:
output.append(sublist)
return output |
def branch_population(merged_population):
"""
create an unmerged population representation
"""
feasible_pop = []
infeasible_pop = []
for instance in merged_population:
if instance.feasibility:
feasible_pop.append(instance)
else:
infeasible_pop.append(instance)
return {"feasible": feasible_pop, "infeasible": infeasible_pop} |
def mesh_ratio(N):
"""
Calculates the mesh refinement ratio between consecutive meshes.
Arguments:
----------
N: list, Number of elements / avg_density in test (depends on what applies).
Returns:
--------
mesh_ratio: list of float, mesh refinement ratio between consequtive meshes.
"""
mesh_ratio = []
for i in range(len(N)-1):
mesh_ratio.append(N[i+1]/N[i])
return mesh_ratio |
def get_names(gname, uid_names, annotations):
"""
return the names of the gene
Arguments:
- `gname`: a gene EcoCyc accession number
"""
try:
p0_name = uid_names[gname[0]]['COMMON-NAME']
except KeyError:
p0_name = gname[0]
p0_desc = '-'
else:
try:
p0_desc = annotations[p0_name]
except KeyError:
p0_desc = '-'
if len(gname)>2 and (gname[2]=='IGR' or gname[2]=='TU' or\
gname[2]=='TU_AS'):
try:
cn2 = uid_names[gname[1]]['COMMON-NAME']
except KeyError:
cn2 = gname[1]
p0_name += '.%s.%s'%(cn2, gname[2])
try:
p1_desc = annotations[cn2]
except KeyError:
p1_desc = '-'
p0_desc += ' : %s'%p1_desc
elif len(gname)>=2:
p0_name += '.%s'%gname[1]
return p0_name, p0_desc |
def bytes_int( seq ):
"""8-bit encoded integer as sequence of 1 to 4 bytes, little-endian.
"""
shift= 0
v= 0
for b in seq:
v += b<<shift
shift += 8
return v |
def check_punctuation(words):
"""Check to see if there's a period in the sentence."""
punctuation = [".", ",", "?"]
return [word for word in words if word in punctuation] |
def digit_present(s):
"""
Returns true for entities that have no common/scientific name in wikidata
and get assigned something like 't307596644'.
"""
return any(i.isdigit() for i in s) |
def stripLabels(testFeatures):
"""
Strips label from a test sentence feature vector
"""
return [testFeatures[i][0] for i in range(len(testFeatures))] |
def search_dict_of_lists(value, dictionary):
"""
Search through a dictionary of lists for a given value.
Parameters
----------
value: Any data-type.
The value that we are searching for in the lists.
dictionary: Dictionary of lists.
A dictionary of lists we're searching through.
Returns
----------
True
If the value is in the dictionary.
False
Otherwise.
Examples
----------
>>> my_dict = {'People' : ['John', 'Mary', 'Joseph'],
... 'Age' : [21, 8, 87],
... 'Height' : [186.4, 203.1, 87.8]}
>>> search_dict_of_lists("John", my_dict)
True
>>> search_dict_of_lists("Carol", my_dict)
False
>>> search_dict_of_lists(87, my_dict)
True
>>> search_dict_of_lists(5, my_dict)
False
>>> search_dict_of_lists(186.4, my_dict)
True
>>> search_dict_of_lists(186.9, my_dict)
False
"""
for key in dictionary.keys():
if value in dictionary[key]:
return True
return False |
def convert_to_float(input_list):
"""Convert list of str to list of float"""
try:
list_float = [float(i) for i in input_list]
return(list_float)
except ValueError as detail:
return ("input is not convertible to float.") |
def get_sap_configs(n_samples, val_per_factor, continuous):
""" Get SAP configs, See Generic function description on top of file for more details
Extra args : continuous (bool) : defines the mode of the evaluated metric"""
gin_config_files = ["./disentanglement_lib/config/benchmark/metric_configs/sap_score.gin"]
gin_bindings = ["sap_score.num_train = {}".format(int(n_samples*0.8)),
"sap_score.num_test = {}".format(int(n_samples*0.2)),
"sap_score.continuous_factors = {}".format(continuous)]
return [[gin_config_files, gin_bindings]] |
def InRange(val, valMin, valMax):
""" Returns whether the value val is between valMin and valMax. """
return val >= valMin and val <= valMax |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.