content stringlengths 42 6.51k |
|---|
def toYelpScore(sentiment):
"""
Transforms a normalized sentiment analysis polarity rating to the Yelp 5 star format.
Arguments:
sentiment {Double} -- Polarity rating.
Returns:
Double -- Transformed polarity rating.
"""
return (sentiment*4)+1 |
def getPutDeltas(delta, optType):
"""
delta: array or list of deltas
optType: array or list of optType "C", "P"
:return:
"""
# otm_x = put deltas
otm_x = []
for i in range(len(delta)):
if optType[i] == "C":
otm_x.append(1-delta[i])
else:
otm_x.append(abs(delta[i]))
return otm_x |
def linear(m0,m1,u,domain=None):
"""For completeness: linear interpolation between m0 and m1 at parameter u in [0,1].
Alternatively, if `domain` != None, then this will use domain=(a,b)
"""
if domain is not None:
return linear(m0,m1,(u-domain[0])/(domain[1]-domain[0]))
return (1.0-u)*m0 + u*m1 |
def form_connection(target, match, selector, weblink='http://'):
"""
Construct the perfect tweet to spark troll romance.
"""
# Two options here, we can either link users without tagging specific tweets
# Or we can find a specific tweet to base the linkage on.
# My current strategy is that the bot will masquerade as a 'factual accuracy' bot
# Who links users making mistakes to users who are knowledgeable about the subject.
# Ideally it won't say who is making the mistake and who is knowledgeable, so that
# both the trolls think they are knowledgeable and shout at each other.
# However, this strategy would prevent us choosing a specific anchor tweet, as this
# would make it obvious who we thought the incorrect user was.
# Ideally the bot would have a weblink we could send people to that would explain
# its 'purpose'.
tweet = "Hi, {user1} and {user2}! We think you'd benefit from talking to each other, see {weblink} for info.".format(user1=target,user2=match,weblink=weblink)
# ??? ???
assert len(tweet)<=140
return tweet |
def multistage_growth_model(dividend, discount_rate, growth_rate, constant_growth_rate, periods):
"""
Summary: Calculate the value of a stock using a multistage growth model.
PARA dividend: The dividend earned over the life of the stock.
PARA type: float
PARA discount_rate: The discount rate used to calculate the NPV & PV calcs.
PARA type: float
PARA growth_rate: The growth rate during the multistage period.
PARA type: float
PARA constant_growth: The growth rate in perpituity.
PARA type: float
PARA periods: The number of periods to be calculated.
PARA type: int
"""
total_value= 0
for period in range(1, periods + 1):
# if it's the last period calculate the terminal value
if period == periods:
# calculate the terminal dividend.
terminal_dividend = (dividend * (1 + growth_rate) ** period)
# calculate the terminal value and then discount it.
terminal_value = terminal_dividend / (discount_rate - constant_growth_rate)
terminal_value_disc = terminal_value / (1 + discount_rate) ** (period -1)
# return the total value of the stock
total_value += terminal_value_disc
# otherwise calculate the cashflow for that period
else:
cashflow = (dividend * (1 + growth_rate) ** period) / (1 + discount_rate) ** period
total_value += cashflow
return total_value |
def separate_name_from_title(title_and_name: str) -> str:
"""
Return just name
Parameters
----------
title_and_name: str
e.g. Mayor Ted Wheeler
Returns
-------
name: str
tile_name_name with first word removed e.g. Ted Wheeler
Notes
-----
first word in title_and_name is presumed to be title
"""
# title_and_name:
# The title (Mayor of Commissioner) and name of a Portland City Commission
# member.
# e.g., Mayor Ted Wheeler, Commissioner Carmen Rubio
name_index = title_and_name.find(" ")
return title_and_name[name_index + 1 :] |
def http_fix(url):
"""Try adding an http schema"""
return "http://" + url |
def initial_chars_match(a, b, chars):
"""Determine if the first n characters of two strings are identical (case
insensitive).
Args:
a: (str, utf-8) the first string to compare.
b: (str, utf-8) the second string to compare.
chars: (int) the number of characters to compare
Returns:
True if the characters match (case insensitive), else false
"""
return a.lower()[:chars] == b.lower()[:chars] |
def pofiles_to_unique_translations_dicts(pofiles):
"""Extracts unique translations from a set of PO files.
Given multiple pofiles, extracts translations (those messages with non
empty msgstrs) into two dictionaries, a dictionary for translations
with contexts and other without them.
Args:
pofiles (list): List of :py:class:`polib.POFile` objects.
Returns:
tuple: dictionaries with translations.
"""
translations, translations_with_msgctxt = ({}, {})
for pofile in pofiles:
for entry in pofile:
if entry.msgctxt:
if entry.msgctxt not in translations_with_msgctxt:
translations_with_msgctxt[entry.msgctxt] = {}
translations_with_msgctxt[
entry.msgctxt
][entry.msgid] = entry.msgstr
else:
translations[entry.msgid] = entry.msgstr
return (translations, translations_with_msgctxt) |
def MBtokb(megabytes):
""" Converts megabytes to kilobits.
:param megabytes: numeric, megabytes
:return: numeric, kilobits equivalent.
"""
kilobytes = megabytes * 8192
return kilobytes |
def str_to_bool(string):
"""Convert a `string` to bool value. Returns ``True`` if `string` is
one of ``["true", "yes", "1", "on"]``, returns ``False`` if `string` is
one of ``["false", "no", "0", "off"]``, otherwise returns ``None``."""
if string is not None:
if string.lower() in ["true", "yes", "1", "on"]:
return True
elif string.lower() in["false", "no", "0", "off"]:
return False
return None |
def lph2kgps(lph):
"""
Volume to mass flow converter Liters/hour to kg/s.
This converter works for **diesel fuel at room temperature only**. Do not use for any other liquids without
adjusting the conversion factor.
The conversion is 1 kg/s = 0.875/3600 l/h.
:param lph: liter per hour volume flow input
:return kgps: kg per second mass flow output
"""
kgps = (0.875/3600) * lph
return kgps |
def distance(strand_a, strand_b):
"""
Return the hamming distance between two DNA
"""
if len(strand_a) != len(strand_b):
raise ValueError("Strands must be of equal length.")
return [strand_a[index] != strand_b[index] for index in range(len(strand_b))].count(True) |
def is_pipfile_requirement(line: str) -> bool:
"""
>>> is_pipfile_requirement('isort = "==4.3.20"')
True
>>> is_pipfile_requirement('[dev-packages]')
False
"""
return len(line.split(' ')) == 3 and '=' in line |
def to_numbers(message: str) -> list:
"""
Turns every letter in the message into number as specified in the statement.
Given message must be in uppercase and should not contain whitespaces.
"""
return [ord(char) - 54 for char in message] |
def get_insert_many_query(table_name: str) -> str:
"""Build a SQL query to insert several RDF triples into a PostgreSQL table.
Argument: Name of the SQL table in which the triples will be inserted.
Returns: A prepared SQL query that can be executed with a list of tuples (subject, predicate, object).
"""
return f"INSERT INTO {table_name} (subject,predicate,object) VALUES %s ON CONFLICT DO NOTHING" |
def CharAppend(string,char, position = -1):
"""
input:
STRING - to add a new char
CHAR - one char for add in the string
POSITON - the position for add the new char, but if not recieve will be add in the last position
output:
STRING - string with the new char added in the position
"""
retorno = str('')
for i in string:
if i == string[position]:
retorno += char
retorno += i
return retorno |
def create_point(x, stats, default=0.0):
"""
get the closest perf of time point x where timestamp < x
:param x:
the time point
:param stats:
list of func. func is tuple of timestamp list and perf list
:param default:
init value of perf
:return:
list of perf of funcs at time point x
"""
perf_list = []
for func in stats:
timestamp, perf = func
last_p = default
for t, p in zip(timestamp, perf):
if t > x:
break
last_p = p
perf_list.append(last_p)
return perf_list |
def _convert_millis_to_human_time(ms: int) -> str:
""" Convert milliseconds to the human readable format: hh:mm:ss. """
seconds = (ms / 1000) % 60
minutes = (ms / (1000 * 60)) % 60
hours = (ms / (1000 * 60 * 60)) % 24
# return int(hours), int(minutes), int(seconds)
time = "{0:02d}:{1:02d}:{2:02d}".format(int(hours), int(minutes), int(seconds))
return time |
def build_road_network(edges):
"""Construct the bidirectional road graph given a list of edges."""
graph = {}
# Graph with bidirectional edges
for edge in edges:
graph.setdefault(edge.start_node, []).append(edge)
graph.setdefault(edge.end_node, []).append(edge.reversed_edge())
return graph |
def has_account(accessing_obj, accessed_obj, *args, **kwargs):
"""
Only returns true if accessing_obj has_account is true, that is,
this is an account-controlled object. It fails on actual accounts!
This is a useful lock for traverse-locking Exits to restrain NPC
mobiles from moving outside their areas.
"""
return hasattr(accessing_obj, "has_account") and accessing_obj.has_account |
def shorten_name(name):
"""
Shorten a name so that it fits within our standard 25-char limit.
If it contains dots, then it'll be collapsed around them as necessary
before possibly being truncated.
>>> shorten_name("ThisIsAVeryVeryVeryVeryVeryLongName")
'ThisIsAVeryVeryVeryVeryV~'
>>> shorten_name("this.is.also.a.pretty.long.name")
't.i.a.a.p.l.name'
"""
if len(name) > 25:
if "." in name:
parts = name.split(".")
last = parts.pop()
done = ""
for x in parts:
done += x[0]
done += "."
done += last
if len(done) > 25:
done = done[:24] + "~"
name = done
else:
name = name[:24] + "~"
return name |
def eq(a, b):
"""Check for equality between a and b.
Return boolean.
**** args ****
a, b:
python objects on which a test for equality will work
"""
if a == b:
return True
else:
return False |
def parse_strandlist(structure):
""" Parses the given strand-list structure specification, returning
a dict of the bonds in the structure. """
bond_dict = {}
for strand_num, strand_struct in enumerate(structure):
for i, elem in enumerate(strand_struct):
bond_dict[(strand_num, i)] = elem
return bond_dict |
def stateToStateAbbr(state):
"""
Used to convert between state names to state abbreviation
"""
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands': 'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
return us_state_abbrev[state].lower() |
def parseOpenPGPFingerprintRecord(value):
"""
Extract fingerprints from an OpenPGP fingerprint record's value.
"""
parts = value.split(";")
fingerprint = None
uri = None
for part in parts:
if part[0:4] == "fpr=":
fingerprint = part[4:]
if part[0:4] == "uri=":
uri = part[4:]
if fingerprint == None:
return None, None
if uri == None:
return fingerprint, None
return fingerprint, uri |
def subarray_defined(header):
"""Return True IFF SUBARRAY related keywords are defined.
>>> header = dict(SUBARRAY="GENERIC",SUBSTRT1="1",SUBSTRT2="1",SUBSIZE1="2048",SUBSIZE2="2048")
>>> subarray_defined(header)
True
>>> header = dict(SUBARRAY="GENERIC",SUBSTRT1="1",SUBSTRT2="1",SUBISIZE2="2048")
>>> subarray_defined(header)
False
"""
for keyword in ["SUBARRAY","SUBSTRT1","SUBSTRT2","SUBSIZE1","SUBSIZE2"]:
value = header.get(keyword,"UNDEFINED")
if value == "UNDEFINED":
return False
return True |
def data_value(value: str) -> float:
"""Convert to a float; some trigger values are strings, rather than
numbers (ex. indicating the letter); convert these to 1.0."""
if value:
try:
return float(value)
except ValueError:
return 1.0
else:
# empty string
return 0.0 |
def calculate_points(record):
"""Calculate league points per provided team record.
Points are earned as follows: win 3 points, draw 1 point
loss 0 points.
Parameters:
record (list): list of wins, draws, and losses
Returns
int: total league points earned
"""
return record[0] * 3 + record[1] |
def _get_param_vals(param):
"""Gets the value of the parameters
:param param: The parameter triple whose values are to be generated.
:returns: The value of the parameter on a five-point stencil.
"""
return [
param[1] + i * param[2]
for i in range(-2, 3)
] |
def chunk_str(content, length=420):
"""
Chunks a string into smaller strings of given length. Returns chunks.
:rtype list
"""
def chunk(c, l):
while c:
out = (c+' ')[:l].rsplit(' ', 1)[0]
c = c[len(out):].strip()
yield out
return list(chunk(content, length)) |
def is_in_file(file_path, text):
"""
Looks for text appearing in a file.
:param str file_path: Path to the source file
:param str text: Text to find in the file
:raises OSError: If the file does not exist
:returns bool: True is text is in file, False otherwise
"""
with open(file_path, 'r') as f:
content = f.read()
return text in content |
def split_metrics_by_namespace_and_name(metrics, namespace, name):
"""Splits metrics list namespace and name.
Args:
metrics: list of metrics from pipeline result
namespace(str): filter metrics by namespace
name(str): filter metrics by name
Returns:
two lists - one of metrics which are matching filters
and second of not matching
"""
matching_metrics = []
not_matching_metrics = []
for dist in metrics:
if dist.key.metric.namespace == namespace\
and dist.key.metric.name == name:
matching_metrics.append(dist)
else:
not_matching_metrics.append(dist)
return matching_metrics, not_matching_metrics |
def lowercase(previous_result):
""" Processes ``previous_result`` to be all lower case. """
return previous_result.lower() |
def carbonblack_binaryinfo_host_observed_alternate(rec):
"""CarbonBlack BinaryInfo Host Observed Watchlist Match"""
return (rec['hostname'] == 'FS-HQ' and
rec['md5'] == '9E4B0E7472B4CEBA9E17F440B8CB0CCC'
) |
def reverse_dict(ori_dict):
"""Reverse dictionary mapping.
Parameters
----------
ori_dict : Dictionary
original given dictionary
Returns
-------
res_dict : Dictionary
The result dictionary with the mapping of one-to-many
"""
res_dict = {}
for key in ori_dict:
if ori_dict[key] not in res_dict:
res_dict[ori_dict[key]] = [key]
else:
res_dict[ori_dict[key]].append(key)
return res_dict |
def recognize_destination(line: str) -> bool:
""" Recognizes .po file target string. """
if line.startswith("msgstr"):
return True
return False |
def hash_to_hex_str(x: bytes) -> str:
"""Convert a big-endian binary hash to displayed hex string.
Display form of a binary hash is reversed and converted to hex.
"""
return x[::-1].hex() |
def max_speed(geo_data):
"""return the max. speed in km/h as float
"""
max_speed = max(geo_data["differential_speed"])
max_speed = round(max_speed * 3.6, 1) # differential_speed is in m/s, we need km/h
return max_speed |
def compare_equality(a, b):
"""Returns True if two arguments are equal.
Both arguments need to have the same dimensionality.
Parameters
----------
a : quantity
b : quantity
Examples
--------
>>> km, m = default_units.kilometre, default_units.metre
>>> compare_equality(3*km, 3)
False
>>> compare_equality(3*km, 3000*m)
True
"""
# Work around for https://github.com/python-quantities/python-quantities/issues/146
try:
a + b
except TypeError:
# We might be dealing with e.g. None (None + None raises TypeError)
try:
len(a)
except TypeError:
# Assumed scalar
return a == b
else:
if len(a) != len(b):
return False
return all(compare_equality(_a, _b) for _a, _b in zip(a, b))
except ValueError:
return False
else:
return a == b |
def parental_numbering(aseq1, aseq2):
""" given two ALIGNED sequences, return a 'position list' for the second
sequence based on the parental sequence """
idx = 1
numlist = []
insertchars = 'abcdefghijklmnopqrstuvwxyz'
insertidx = 0
for s1, s2 in zip(aseq1, aseq2):
if s2 == '-':
idx += 1
continue
if s1 == '-':
numlist.append(str(idx - 1) + insertchars[insertidx % len(insertchars)])
insertidx += 1
continue
insertidx = 0
numlist.append(str(idx))
idx += 1
return numlist |
def guess_archive_type(pathname):
"""
Guess the file type of a Python source distribution archive.
Checks for known "magic" file headers to identify the type of the archive.
Previously this used the ``file`` executable, now it checks the magic file
headers itself. I could have used any of the numerous ``libmagic`` bindings
on PyPI, but that would add a binary dependency to ``pip-accel`` and I
don't want that :-).
:param pathname: The pathname of an existing archive (a string).
:returns: One of the strings ``gzip``, ``bzip2`` or ``zip`` or the value
``None`` when the filename extension cannot be guessed based on
the file header.
"""
with open(pathname, 'rb') as handle:
header = handle.read(2)
if header.startswith(b'\x1f\x8b'):
# The gzip compression header is two bytes: 0x1F, 0x8B.
return 'gzip'
elif header.startswith(b'BZ'):
# The bzip2 compression header is two bytes: B, Z.
return 'bzip2'
elif header.startswith(b'PK'):
# According to Wikipedia, ZIP archives don't have an official magic
# number, but most of the time we'll find two bytes: P, K (for Phil
# Katz, creator of the format).
return 'zip' |
def json_to_xml(json_obj, line_padding=" "):
"""Function which converts json to xml format"""
result_list = list()
json_obj_type = type(json_obj)
if json_obj_type is list:
for sub_elem in json_obj:
result_list.append(json_to_xml(sub_elem, line_padding))
return "\n".join(result_list)
if json_obj_type is dict:
for tag_name in json_obj:
sub_obj = json_obj[tag_name]
result_list.append("%s<%s>" % (line_padding, tag_name))
result_list.append(json_to_xml(sub_obj, "\t" + line_padding))
result_list.append("%s</%s>" % (line_padding, tag_name))
return "\n".join(result_list)
return "%s%s" % (line_padding, json_obj) |
def generate_day_dict(days, tasks):
"""
Creates a dictionary of days as keys and values as dictionaries of tasks with keys as empty lists
:param days: days to use as keys
:param tasks: tasks to use in the inner dicts
:return: dictionary
"""
day_dict = {}
task_dict = {}
for task in tasks:
task_dict[task] = []
for day in days:
day_dict[day] = task_dict.copy()
return day_dict |
def a_at_eccT(ecc_T, ecc_F, p):
"""
Computes the semi-major axis of the transfer orbit.
Parameters
----------
ecc_T: float
Eccentricity transverse component.
ecc_F: float
Eccentricity of the fundamental ellipse.
p: float
Transfer orbit parameter or semi-latus rectum.
Returns
-------
a: float
Semi-major axis of the transfer orbit.
"""
a = p / (1 - ecc_F ** 2 - ecc_T ** 2)
return a |
def build_path(segment, running_on_floydhub=False):
"""
Builds the full path to `segment`, depending on where we are running our code.
Args
:segment File or directory we want to build the full path to.
"""
if running_on_floydhub:
return '/floyd/input/data/{}'.format(segment)
else:
return 'data/{}'.format(segment) |
def description(descfile):
"""Get the contents of the DESCRIPTION file of a font project."""
if not descfile:
return
import io
return io.open(descfile, "r", encoding="utf-8").read() |
def ppm_from_temp(temperature):
"""
Calculates chemical shift from give temperature
>>> from watertemp import ppm_from_temp
>>> ppm_from_temp(32.0)
4.7
>>> ppm_from_temp(-30)
5.388888888888889
>>>
"""
return (455 - temperature) / 90 |
def clean_dollars(x):
"""
Used to clean up dollar fields of $ and commas
"""
if type(x) != float:
return x.replace('$','').replace(',','').replace('city','') |
def colorize(text, color):
"""
Wrap `text` with ANSI `color` code. See
https://stackoverflow.com/questions/4842424/list-of-ansi-color-escape-sequences
"""
code = f"\033[{color}m"
restore = "\033[0m"
return "".join([code, text, restore]) |
def _good_output(data, x_data=None):
"""A model function that correctly gives a numpy array and dictionary output."""
return data, {'param': 1, 'param2': 2} |
def get_types(data):
"""
Return one typle with all data types in data variable.
"""
types = ()
for d in data:
types = (*types, type(d))
return types |
def max_recursion(tup):
"""
Returns the largest element in the tuple.
>>> max_recursion((1,2,3,4))
4
>>> max_recursion((13,2,3,4))
13
>>> max_recursion((13,2,33,4))
33
"""
if len(tup) == 1:
return tup[0]
return tup[0] if tup[0] > max_recursion(tup[1:]) else max_recursion(tup[1:]) |
def linearize_subtable(subtable, table_page_title, table_section_title):
"""Linearize the highlighted subtable and return a string of its contents."""
table_str = ""
if table_page_title:
table_str += "<page_title> " + table_page_title + " </page_title> "
if table_section_title:
table_str += "<section_title> " + table_section_title + " </section_title> "
table_str += "<table> "
for item in subtable:
cell = item["cell"]
row_headers = item["row_headers"]
col_headers = item["col_headers"]
# The value of the cell.
item_str = "<cell> " + cell["value"] + " "
# All the column headers associated with this cell.
for col_header in col_headers:
item_str += "<col_header> " + col_header["value"] + " </col_header> "
# All the row headers associated with this cell.
for row_header in row_headers:
item_str += "<row_header> " + row_header["value"] + " </row_header> "
item_str += "</cell> "
table_str += item_str
table_str += "</table>"
return table_str |
def distance_to_first_repeated_block(directions):
"""Returns the block distance to the first block visited twice.
Args:
directions (list): A list of strings indication turn direction and
length of distance.
Returns:
(int): Distance, in blocks, from origin to the first block visited
twice.
"""
index = 0
ns_parity = 1
ew_parity = 1
direction_parity_table = {'R': 1, 'L': -1}
travel_location = [0, 0]
locations_visited = set()
locations_visited.add(str(travel_location))
for direction in directions:
direction_parity = direction_parity_table[direction[0]]
_, distance = direction[0], int(direction[1:])
if index % 2 == 0: #heading ew
if ns_parity == 1: #facing north
ew_parity = direction_parity
else: #facing south
ew_parity = -direction_parity
old_ew = travel_location[1]
travel_location[1] += ew_parity * distance
for num in range(old_ew + ew_parity, travel_location[1] + ew_parity,
ew_parity):
temp_location = [travel_location[0], num]
if str(temp_location) in locations_visited:
return sum([abs(num) for num in temp_location])
locations_visited.add(str(temp_location))
else: #heading ns
if ew_parity == 1: #facing east
ns_parity = -direction_parity
else: #facing west
ns_parity = direction_parity
old_ns = travel_location[0]
travel_location[0] += ns_parity * distance
for num in range(old_ns + ns_parity, travel_location[0] + ns_parity,
ns_parity):
temp_location = [num, travel_location[1]]
if str(temp_location) in locations_visited:
return sum([abs(num) for num in temp_location])
locations_visited.add(str(temp_location))
index += 1
return -1 |
def exchange_rate_with_fee(exchange_rate: float, spread: int) -> float:
"""Calculate the exchange fee with spread fee
Args:
exchange_rate (float): the unit value of the foreign currency.
spread (int): percentage that is taken as an exchange fee.
Returns:
float: exchange rate plus exchange fee.
"""
return exchange_rate * (1 + (spread / 100)) |
def get_padding(kernel_size, dilation=1):
"""get padding size"""
return int((kernel_size * dilation - dilation) / 2) |
def _pf1b(val1, val2):
"""
Returns
-------
int
Description for the return statement
"""
return int(val1 + int(val2[0])) |
def charge(ph,pkalist,chargelist):
"""Jacob Tolborgs charge model where the charge is assigned from partial charges from all pKa values at the pH point"""
chargesum = []
for charge,pka in zip(chargelist, pkalist):
#print charge, pka
if charge == 1:
charge = 1/(1+10**(ph-pka))
chargesum.append(charge)
else:
charge = -1/(1+10**-(ph-pka))
chargesum.append(charge)
return sum(chargesum) |
def _get_result_from_dynamo_query(response: dict) -> dict:
"""
Dynamo returns list of db items in the table. Query returns only 1 items to take the first item from the list.
"""
item_retrieved_from_db = response["Items"]
item_retrieved_from_db = item_retrieved_from_db[0]
return item_retrieved_from_db |
def kvadrat_n(n_vrstic):
""" vrni string, ki bo narisal kvadrat v velikost n_vrstic """
# BEGIN SOLUTION
result = ''
# END SOLUTION
return result |
def encode_varint(value, write):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
value (int): Value to encode
write (function): Called per byte that needs to be writen
Returns:
int: Number of bytes written
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f: # 1 byte
write(value)
return 1
if value <= 0x3fff: # 2 bytes
write(0x80 | (value & 0x7f))
write(value >> 7)
return 2
if value <= 0x1fffff: # 3 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(value >> 14)
return 3
if value <= 0xfffffff: # 4 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(value >> 21)
return 4
if value <= 0x7ffffffff: # 5 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(0x80 | ((value >> 21) & 0x7f))
write(value >> 28)
return 5
else:
# Return to general algorithm
bits = value & 0x7f
value >>= 7
i = 0
while value:
write(0x80 | bits)
bits = value & 0x7f
value >>= 7
i += 1
write(bits)
return i |
def CommaSeparatedList(value_list, is_quoted=False):
"""Concatenates a list of strings.
This turns ['a', 'b', 'c'] into a single string 'a, b and c'. It optionally
adds quotes (`a') around each element. Used for logging.
"""
if is_quoted:
value_list = ["`" + value + "'" for value in value_list]
if len(value_list) > 1:
return (', '.join(value_list[:-1]) + ' and ' + value_list[-1])
elif value_list:
return value_list[0]
else:
return '' |
def array_to_string(row_list: list) -> str:
"""Takes list and stringify it."""
return "[" + ",".join([str(elem) for elem in row_list]) + "]" |
def overlap(a, b):
"""
Checks to see if two series intersect, or have identical start/end positions.
"""
# if any start / end is None then it doesn't overlap
if a[0] is None or a[1] is None or b[0] is None or b[1] is None:
return False
# If the casing start/end intersects
records_intersect = (a[0] > b[0] and a[0] < b[1]) or (a[1] > b[0] and a[1] < b[1])
# If the series start or end in the same place
records_overlap = (a[0] == b[0]) or (a[1] == b[1])
return records_intersect or records_overlap |
def dedup(s):
"""
Deduplication of a list.
Reference: https://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
seen_add = seen.add
return [x for x in s if not (x in seen or seen_add(x))] |
def roi(cost, dis):
"""calculates the gain from an investment
parameters:
-----------
cost: cost of the investment
dis: disposal value of the investment
"""
gain = dis - cost
ROI = (gain/cost)
return round(ROI, 4) |
def mean(curr_avg: float, n: int, new_value: float) -> float:
"""
Updates an average value using the real time streaming algorithm which
uses constant space for calculations and doesn't overflow a sum counter.
Gives the exact value not an approximation.
NB: Value of curr_avg doesn't matter for first call as long as n is set
to 0 for first call
:param curr_avg: The current average value
:param n: The number of values that have been summed so far
:param new_value: The value to be added to the average
:return: The newly updated average value
"""
return (curr_avg * n + new_value) / (n + 1) |
def handle_effects(state_combat):
"""
handle_effects returns
"""
# Handle the effects during the player's turn
if state_combat['shield_timer'] > 0:
state_combat['shield_timer'] = state_combat['shield_timer'] - 1
state_combat['player_armor'] = 7
else:
state_combat['player_armor'] = 0
if state_combat['poison_timer'] > 0:
state_combat['poison_timer'] = state_combat['poison_timer'] - 1
state_combat['boss_hp'] = state_combat['boss_hp'] - 3
if state_combat['recharge_timer'] > 0:
state_combat['recharge_timer'] = state_combat['recharge_timer'] - 1
state_combat['player_mana'] += 101
return state_combat |
def acronym_gen(phrase):
"""Acronym generator
Args:
phrase (str): Phrase to shorten to acronym
Returns:
str: Acronym
"""
phrase_split = phrase.split()
acronym = ""
## iterate through every substring
for i in phrase_split:
acronym = acronym + i[0].upper()
return acronym |
def to_struct(cond, brackets='[]'):
"""Recursively scanning through a tokenized expression and building the
condition function step by step
"""
openbrkt, closebrkt = brackets
expr1 = None
res = []
while cond:
part = cond.pop(0)
if part == openbrkt:
lev = 1
inner = []
while not (lev == 1 and cond[0] == closebrkt):
inner.append(cond.pop(0))
if inner[-1] == openbrkt:
lev += 1
elif inner[-1] == closebrkt:
lev -= 1
cond.pop(0)
res.append(to_struct(inner, brackets))
else:
res.append(part)
return res |
def _dict_flatten(data):
"""Return flattened dict of input dict <data>.
After https://codereview.stackexchange.com/revisions/21035/3
Parameters
----------
data : dict
Input dict to flatten
Returns
-------
fdata : dict
Flattened dict.
"""
def expand(key, value):
"""Expand list."""
if isinstance(value, dict):
return [(key+'>'+k, v) for k, v in _dict_flatten(value).items()]
else:
return [(key, value)]
return dict([item for k, v in data.items() for item in expand(k, v)]) |
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2]) |
def get_id(filename, prefix, ext):
"""
Given a path to a file in the form of <path>/<prefix><id>.<ext>
returns <id>
Parameters
----------
filename : str
full path or basepath to `prefix` prefixed file name
with extension `ext`
prefix : str
prefix of file name
ext : str
extension of file
Example
-------
filename = './DeepDriveMD/data/val-loss-0-timestamp-98.npy'
id_ = get_id(filename, prefix='val-loss-', ext='npy')
print(id_) -> 0-timestamp-98
"""
if prefix not in filename:
raise Exception(f'prefix: {prefix} not in filename: {filename}')
if ext not in filename:
raise Exception(f'ext: {ext} not in filename: {filename}')
return filename.split(prefix)[1].split(ext)[0][:-1] |
def get_bbox(row):
"""
Extract the bounding box from the annotations.
"""
return row[2], row[3], row[4], row[5] |
def remove_quotes(s):
"""Removes start/end quotes from a string, if needed.
If s is not a string, it is returned untouched.
"""
if not isinstance(s, str):
return s
if len(s) < 2:
return s
if s[0]+s[-1] in ['""', "''"]:
return s[1:-1]
return s |
def recursiveSum(arr: list):
"""
input : List
output : Return Recursive Sum
>>> print(recursiveSum([6,4,2]))
"""
if arr == []:
return 0
else:
head = arr[0]
smallerList = arr[1:]
return head + recursiveSum(smallerList) |
def check_if_param_valid(params: dict) -> bool:
"""
Check if the parameters are valid.
"""
for i in params:
if i == "filter_speech_first":
if not type(params["filter_speech_first"]) == bool:
raise ValueError("Invalid inputs! filter_speech_first should be either True or False!")
elif i == "pad_onset":
continue
elif i == "pad_offset":
continue
else:
for j in params[i]:
if not j >= 0:
raise ValueError(
"Invalid inputs! All float parameters except pad_onset and pad_offset should be larger than 0!"
)
if not (all(i <= 1 for i in params['onset']) and all(i <= 1 for i in params['offset'])):
raise ValueError("Invalid inputs! The onset and offset thresholds should be in range [0, 1]!")
return True |
def fix_cf_supplier_ids(json):
""" Modify Contracts Finder OCDS JSON enough to be processed """
# Fixing to match current version, to be processed by ocdskit
json['version'] = '1.0'
# Giving suppliers distinct IDs (source sets all supplier IDs to zero)
for i, supplier in enumerate(json['releases'][0]['awards'][0]['suppliers']):
json['releases'][0]['awards'][0]['suppliers'][i]['id'] = str(i)
return json |
def frame_to_time(frame, fps):
"""convert frame number to time"""
outtime = frame/fps
return outtime |
def pow_op_to_callable(expr: str) -> str:
"""
Replace a string expression of the form 'a**b' or 'a^b'
to 'pow(a, b)'. Expressions of the form 'a**(b**c**...)'
will be replaced with 'pow(a, b**c**...)'.
A necessary precondition is that the expression either contains
'^' or '**'.
Parameters:
expr: string expression of the form 'a**b'
"""
caret, stars = expr.find('^'), expr.find('**')
if stars >= 0 and caret < 0:
bin_tokens = expr.split('**', 1)
elif stars < 0 and caret >= 0:
bin_tokens = expr.split('^', 1)
else:
bin_tokens = (expr.split('^', 1) if caret < stars
else expr.split('**', 1))
return 'pow(' + bin_tokens[0] + ',' + bin_tokens[1] + ')' |
def _nibbles_to_bits(line):
"""Convert from icebox hex string for ramdata in asc files to an array of Bool"""
res = []
for ch in line:
res += [xx == "1" for xx in "{:4b}".format(int(ch, 16))]
res.reverse()
return res |
def descuento_tres(i,j,nivel):
"""Verifica si el casillero es de descontar tres puntos"""
if nivel == "facil":
if (i == 6 and j == 6) or (i == 6 and j == 8) or (i == 8 and j == 6) or (i == 8 and j == 8):
return True
elif nivel == "medio":
if (i == 0 and j == 7) or (i == 14 and j == 7) or (i == 7 and j == 0) or (i == 7 and j == 14):
return True
elif nivel == "dificil":
if (i == 10 and j == 1) or (i == 4 and j == 1) or (i == 1 and j == 4) or (i == 13 and j == 4) or (i == 1 and j == 10) or (i == 4 and j == 13) or (i == 13 and j == 10) or (i == 10 and j == 13):
return True
else: return False |
def link_song(track_id):
"""Generates a song link on Spotify, given the id
Args:
- track_id: id of the song
Returns:
a link to that song on Spotify
"""
return "https://open.spotify.com/track/" + track_id |
def compare_elements(a, b):
"""Compare elements if a and b have same elements.
This method doesn't consider ordering
"""
if a is None:
a = []
if b is None:
b = []
return set(a) == set(b) |
def compute_error_for_line_given_points(b, m, points):
"""Error function (also called a cost function).
It measures how "good" a given line is.
This function will take in a (m,b) pair and return an
error value based on how well the line fits our data.
To compute this error for a given line, we'll iterate through each (x,y)
point in our data set and sum the square distances between each point's y
value and the candidate line's y value (computed at mx + b).
Lines that fit our data better (where better is defined by our error
function) will result in lower error values.
"""
total_error = 0
for x, y in points:
total_error += (y - (m * x + b)) ** 2
return total_error / float(len(points)) |
def _path_to_str(path):
"""Converts a path (tuple of strings) to a printable string."""
return ":".join(str(field) for field in path) |
def ascii_encode(string: str):
"""
Encodes the string in ascii in ignore mode
Args:
string: The string
Returns:
The string in ascii
"""
return str(string.encode('ascii', 'ignore'))[2:-1] |
def distance_to_buffer(distance):
"""
Convert great circle distance (in small range < 1000km) to
Euclidean form of point buffer used by shapely.
:param distance: great circle distance in kilometers
:return: point shift in Euclidean coordinates.
"""
magic_num = 1078.599717114 # km
return distance / magic_num |
def get_text_window(offset, matchlen, textsize, width):
""" prepreprepre MATCH postpostpost
^ ^ ^ ^
l-width l l+len l+len+width
left_y left_x right_x right_y
"""
left_x = offset - width
left_y = offset - 1
right_x = offset + matchlen
right_y = right_x + width
if left_x < 0:
left_x = 0
if left_y < left_x:
left_y = left_x
# bounds checking END....y? then y=END, results in shorter postmatch
if right_y >= textsize:
right_y = textsize - 1
# bounds checking y.... x? then x=y, results in empty postmatch
if right_x > right_y:
right_x = right_y
return [left_x, left_y, right_x, right_y] |
def remove_nested_parens(s):
"""
Returns a copy of "s" with parenthesized text removed.
Nested paretheses are handled properly.
"""
result = ""
paren_level = 0
for c in s:
if c == "(":
paren_level += 1
elif (c == ")") and paren_level:
paren_level -= 1
elif not paren_level:
result += c
return result |
def get_broker_partition_counts(brokers):
"""Get a list containing the number of partitions on each broker"""
return [len(broker.partitions) for broker in brokers] |
def get_info_on_inputs(named_inputs, n_unnamed_inputs):
"""
Return a human-readable description of named and un-named inputs.
"""
n_named_inputs = len(named_inputs)
def get_plural(n):
if n > 1:
return "s"
else:
return ""
if n_named_inputs == 0:
if n_unnamed_inputs == 0:
msg = "The function is supposed to have no input."
else:
if n_unnamed_inputs == 1:
msg = (
"The function has a single input variable which has no "
"name, and thus cannot be assigned through a keyword"
" argument (use 'name=...' in a Variable's "
"constructor to give it a name)."
)
else:
# Use plural.
msg = (
f"The function has {n_unnamed_inputs} inputs, but none of them is named,"
" and thus they cannot be assigned through keyword "
"arguments (use 'name=...' in a Variable's "
"constructor to give it a name)."
)
else:
if n_unnamed_inputs == 0:
msg = "The function has {} named input{} ({}).".format(
n_named_inputs,
get_plural(n_named_inputs),
", ".join(named_inputs),
)
else:
msg = (
f"The function has {n_named_inputs} named input{get_plural(n_named_inputs)} ({', '.join(named_inputs)}), and {n_unnamed_inputs} unnamed "
f"input{get_plural(n_unnamed_inputs)} which thus cannot be accessed through keyword "
f"argument{get_plural(n_unnamed_inputs)} (use 'name=...' in a variable's constructor "
"to give it a name)."
)
return msg |
def wrap_query(query, props):
"""
Surrounds the core Elasticsearch aggregation <query> with one
Terms aggregation for every field in the list <props>.
Fields at the beginning of the list will end up as outer aggregations,
while later fields will be nested more deeply and the core query will
be nested deepest.
:param query: A `dict` representing an Elasticsearch aggregation.
:param props: A list of Elasticsearch text field names.
:returns: Aggregation containing <query> wrapped in Terms aggregations.
"""
if len(props) == 0:
return query
field_name = props[-1]
field_full = 'properties.{}.raw'.format(field_name)
remaining_fields = props[:-1]
wrapped = {
'distinct_{}'.format(field_name): {
'terms': {
'size': 10000,
'field': field_full,
'order': {'_key': 'asc'}
},
'aggregations': query
}
}
return wrap_query(wrapped, remaining_fields) |
def signext(val, mask):
"""Use the part of val which is covered by mask (required to be all zeros
followed by all ones) as a signed value (two's complement)"""
val = val & mask
if val & ~(mask>>1):
val |= ~mask
return val |
def index_to_world(index, origin, spacing):
""""Translates index to world coordinates"""
return origin + index * spacing |
def _line_to_list(line):
"""Converts a pla line to a list of ints.
Raises a ValueError if it encounters an unexpected character."""
l = []
for c in line:
if c == '0':
l.append(0)
elif c == '1':
l.append(1)
elif c == '-' or c == '~':
l.append(2)
else:
raise ValueError
return l |
def _sanitize_name(name: str):
"""convert into a valid name for AI Platform Pipelines"""
# python IDs will allow underscores but disallow hyphens, so some
# interpolation is necessary
return name.replace("_", "-") |
def get_exclamation_count(text):
"""
Obtains the number of exclamation points in the input text
Args:
text: string containing text to count the exclamation points in.
Returns:
The number of exclamation points in the input text
"""
c = text.count("!")
return c |
def prepend_slash(path):
""" Add a slash in front
"""
return path if path.startswith('/') else f'/{path}' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.