content stringlengths 42 6.51k |
|---|
def execute(instructions):
"""
Execute instructions, return a tuple of accumulator and exit reason.
"""
index = 0
accumulator = 0
list_of_pc = set()
while True:
if index in list_of_pc:
return (accumulator, "cycle")
if index == len(instructions):
return (accumulator, "termination")
list_of_pc.add(index)
# print(f"index {index}")
instruction = instructions[index]
# print(f"instruction {instruction}")
if instruction["operation"] == "nop":
index += 1
continue
if instruction["operation"] == "acc":
accumulator += instruction["argument"]
index += 1
continue
if instruction["operation"] == "jmp":
index += instruction["argument"]
continue |
def is_dict(d):
"""Returns true if `d` is a subclass of dict"""
return isinstance(d, dict) |
def merge_dict(*dicts):
""" Merge a collection of dictionaries
>>> merge_dict({1: 'one'}, {2: 'two'})
{1: 'one', 2: 'two'}
Later dictionaries have precedence
>>> merge_dict({1: 2, 3: 4}, {3: 3, 4: 4})
{1: 2, 3: 3, 4: 4}
"""
rv = dict()
for d in dicts:
rv.update(d)
return rv |
def C(n, k):
"""
C(n, k) = n! / (n - k)! k!
"""
c = 1
if (k > n - k):
k = n - k
for a in range(0, k):
c = c * (n - a)
c = c // (a + 1)
return c |
def sorter(data):
"""
Sort dictinary
:param data: dict
:return:
"""
return sorted(data.items(), key=lambda kv: kv[0], reverse=False) |
def merge_transition(trans, nxt):
"""
Gets the transiton for the merge operations of two columns
:param trans:
:param nxt:
:return:
"""
if type(trans) == str:
return trans
else:
return nxt |
def convert_allele_origins(orig_allele_origins):
"""Splits the original list of allele origins from ClinVar into up to two groups: one for 'somatic' (if present),
and another one for all other types, which are considered 'germline' (if present)."""
orig_allele_origins = {item.lower() for item in orig_allele_origins}
converted_allele_origins = []
if 'somatic' in orig_allele_origins:
converted_allele_origins.append(['somatic'])
orig_allele_origins.remove('somatic')
if orig_allele_origins: # Is there something remaining for the second (non-somatic) group?
converted_allele_origins.append(sorted(orig_allele_origins))
return converted_allele_origins |
def addstr(arg1, arg2):
"""concatenate arg1 & arg2"""
return str(arg1) + ' ' + str(arg2) |
def add_postfix(quoted_string, postfix):
""" Append a string to the end of the table name.
If input table name is quoted by double quotes, make sure the postfix is
inside of the double quotes.
Arguments:
@param quoted_string: str. A string representing a database
quoted string
@param postfix: str. A string to add as a suffix to quoted_string.
** This is assumed to not contain any quotes **
"""
quoted_string = quoted_string.strip()
if quoted_string.startswith('"') and quoted_string.endswith('"'):
output_str = quoted_string[:-1] + postfix + '"'
else:
output_str = quoted_string + postfix
return output_str |
def _isFunction(v):
"""
A utility function to determine if the specified
value is a function.
"""
if v is None:
return False
if hasattr(v, "__call__"):
return True
return False |
def is_magic( token, token_start, buf ):
"""
Detect if the passed token corresponds to a magic command: starts
with a percent, and it's at the beginning of the buffer
"""
return token[0] == '%' and token_start==0 |
def deumlaut(s):
"""
Replaces umlauts with fake-umlauts
"""
s = s.replace('\xdf', 'ss')
s = s.replace('\xfc', 'ue')
s = s.replace('\xdc', 'Ue')
s = s.replace('\xf6', 'oe')
s = s.replace('\xd6', 'Oe')
s = s.replace('\xe4', 'ae')
s = s.replace('\xc4', 'Ae')
return s |
def process_record_assistant1(col_start_time,col_end_time,col_summary,record):
"""
Check if the txhash exists or not in the end_time table
if yes, send streaming data to query tx details and remove related record in the end_time db
else, save data in the start_time db
"""
if('txhash' in record):
doc = col_end_time.find_one({"txhash": record['txhash']} )
try:
if(doc != None):
end_time = doc['blocktime']
start_time = record['seconds']
end_time = int(end_time, 16)
waiting_mined_time = end_time - start_time
blocknumber = int(doc['blocknumber'], 16)
row = {"txhash": record['txhash'], "blocknumber": blocknumber, "blocktime": end_time,"waiting_time": 0.0,"actual_cost": 0.0, "gas_price":0.0, "gas": 0.0, "gas_used": 0.0, "waiting_mined_time": waiting_mined_time}
result = col_summary.insert_one(row)
print('inserted')
return 0
else:
print("insert into start_time")
# Insert the item to start_time db, ignore the item if duplicate
col_start_time.insert(record)
return 1
except Exception as e:
print(e)
return -1
return -1 |
def not_in(a, b):
"""Evalutes a not in b"""
result = False if a in b else True
return result |
def nested_shape(array_or_tuple, seen=None):
"""Figures out the shape of tensors possibly embedded in tuples
i.e
[0,0] returns (2)
([0,0], [0,0]) returns (2,2)
(([0,0], [0,0]),[0,0]) returns ((2,2),2)
"""
if seen is None:
seen = set()
if hasattr(array_or_tuple, 'size'):
# pytorch tensors use V.size() to get size of tensor
return list(array_or_tuple.size())
elif hasattr(array_or_tuple, 'get_shape'):
# tensorflow uses V.get_shape() to get size of tensor
return array_or_tuple.get_shape().as_list()
elif hasattr(array_or_tuple, 'shape'):
return array_or_tuple.shape
seen.add(id(array_or_tuple))
try:
# treat object as iterable
return [nested_shape(item, seen) if id(item) not in seen else 0 for item in list(array_or_tuple)]
except TypeError:
# object is not actually iterable
# LB: Maybe we should throw an error?
return [] |
def negate_columns(decision_matrix, optimization_type):
""" negate columns of decision matrix in case optimization type is 1 (minimize) """
for j in range(len(optimization_type)):
if optimization_type[j] == 1:
for i in range(len(decision_matrix)):
decision_matrix[i][j] = decision_matrix[i][j]*(-1.0)
return decision_matrix |
def divisibleBy3And5(num):
"""assumes num is a number
returns True is num is divisible by 3 and 5, else 5
"""
return num % 3 == 0 and num % 5 == 0 |
def forgiver_reclassifier(original_classifier, p):
"""Function to reclassify the strategy"""
if p not in (0, 1):
original_classifier["stochastic"] = True
return original_classifier |
def field_is_void(field):
"""Check if the field in the parameter is void, otherwise return False"""
if field == '':
return "Field not available"
else:
return False |
def bisect_first_true(arr):
"""Binary search for first True occurrence."""
lo, hi = 0, len(arr)
while lo < hi:
mid = lo + hi >> 1
if arr[mid]: hi = mid
else: lo = mid + 1
return lo |
def twos(val, bits):
"""
Finds the twos complement of the number
:param val: input to be complemented
:param bits: size of the input
:type val: str or int
:type bits: int
:result: two's complement version of the input
"""
if isinstance(val, str):
if '0x' in val:
val = int(val, 16)
else:
val = int(val, 2)
if (val & (1 << (bits - 1))) != 0:
val = val - (1 << bits)
return val |
def group_range_instances_json(obj):
"""
Test range parser
>>> group_range_instances_json(json.loads('{"AutoScalingGroups": [{"MinSize": 1, "MaxSize": 4}]}'))
(1, 4)
"""
group = obj['AutoScalingGroups'][0]
group_min = group['MinSize']
group_max = group['MaxSize']
return (group_min, group_max) |
def _get_initial_states_constraints(n_mixtures, factors):
"""Enforce that the x values of the first factor are increasing.
Otherwise the model would only be identified up to the order of the start factors.
Args:
n_mixtures (int): number of elements in the mixture of normal of the factors.
factors (list): the latent factors of the model
Returns:
constraints (list)
"""
msg = (
"This constraint enforces an ordering on the initial means of the states "
"across the components of the factor distribution. This is necessary to ensure "
"uniqueness of the maximum likelihood estimator."
)
if n_mixtures > 1:
ind_tups = [
("initial_states", 0, f"mixture_{emf}", factors[0])
for emf in range(n_mixtures)
]
constr = [{"loc": ind_tups, "type": "increasing", "description": msg}]
else:
constr = []
return constr |
def is_key_translated(verbose, obj_id, key, object_json):
""" Checks if the given key of an object is translated in input JSON """
if key in object_json:
return True
if verbose:
print(f"No translation for {obj_id} string '{key}' in dump file -- skipping")
return False |
def get_gap_symbol(seq):
"""Return gap symbol.
seq: Sequence object or plain string.
Should be able to handle cogent.core Sequence and ModelSequence object.
If the input sequence doesn't have a MolType, '-' will be returned
as default.
"""
try:
gap = seq.MolType.Alphabet.Gap
except AttributeError:
gap = '-'
return gap |
def check_not_finished_board(board: list):
"""
Check if skyscraper board is not finished, i.e., '?' present on the game board.
Return True if finished, False otherwise.
>>> check_not_finished_board(['***21**', '4?????*', '4?????*', '*?????5', \
'*?????*', '*?????*', '*2*1***'])
False
>>> check_not_finished_board(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
True
>>> check_not_finished_board(['***21**', '412453*', '423145*', '*5?3215', \
'*35214*', '*41532*', '*2*1***'])
False
"""
for elem in board:
if '?' in elem:
return False
return True |
def create_twitch_url(streamer_name):
"""
Retorna o URL do canal e o nome do mesmo
"""
return "twitch.tv/" + streamer_name, streamer_name |
def sub_integral_to_sint(real_val, prec):
"""
Map a floating point number between the values of -1 and 1 to a
signed integral value in the range [-2^(@prec-1)-1,
2^(@prec-1)-1]. This does not permit the value -2^(@prec-1) even
though it is in the valid two's complement range in order to
simplify the computation somewhat.
"""
int_val = int(round(real_val * (2 ** (prec - 1))))
if int_val == 2 ** (prec - 1):
int_val -= 1
return int_val |
def _divide_bundles(bundles):
"""Take each subsegment inside a bundle and put it in its own bundle,
copying the bundle metadata."""
divided = []
for bund in bundles:
for t in bund['times']:
new_bund = bund.copy()
new_bund['times'] = [t]
divided.append(new_bund)
return divided |
def getsquarevals(grid, row, col):
"""
Get all the numbers from the 3x3 square including the row and the column
"""
irow = row//3
icol = col//3
return [grid[i][j] for i in range(irow * 3, irow * 3 + 3) \
for j in range(icol * 3, icol * 3 + 3)] |
def min_sec(sec,second_digits=1,left_pad=2):
""" takes float value, represents as minutes, seconds e.g. 62.33242 -> '1m2.3s'
secondDigits refers to the digits after the decimal point to print,
left_pad to the left padding on the seconds (for things to line up). Examples:
min_sec(1.3) == '0m01.3s'
min_sec(13) == '0m13.0s'
min_sec(5.3,0) == '0m05s'
min_sec(5.3,0,0) == '0m5s'
"""
format_string = "%%dm%%0%d.%dfs"%(left_pad,second_digits)
return format_string%(sec/60.,sec%60.) |
def sanitize_basisname(name):
"""Function to return *name* in coded form, stripped of
characters that confuse filenames, characters into lowercase,
``+`` into ``p``, ``*`` into ``s``, and ``(``, ``)``, & ``,``
into ``_``.
"""
temp = name.lower()
temp = temp.replace('+', 'p')
temp = temp.replace('*', 's')
temp = temp.replace('(', '_')
temp = temp.replace(')', '_')
temp = temp.replace(',', '_')
return temp |
def get_marker_indices(marker,line):
""" method to find the start and end parameter markers
on a template file line. Used by write_to_template()
Parameters
----------
marker : str
template file marker char
line : str
template file line
Returns
-------
indices : list
list of start and end indices (zero based)
"""
indices = [i for i, ltr in enumerate(line) if ltr == marker]
start = indices[0:-1:2]
end = [i+1 for i in indices[1::2]]
assert len(start) == len(end)
return start,end |
def calc_magtype(ev_dict):
"""Determine the magnitude
and scale to use per event
Assumes a netmag table
is present"""
if ev_dict['magnitude'] > 0:
mag = ev_dict['magnitude']
mag_sc = ev_dict['magtype']
elif ev_dict['mb'] > 0:
mag = ev_dict['mb']
mag_sc = "Mb"
elif ev_dict['ms'] > 0:
mag = ev_dict['ms']
mag_sc = "Ms"
elif ev_dict['ml'] > 0:
mag = ev_dict['ml']
mag_sc = "Ml"
else:
mag = '-'
mag_sc = ""
return mag, mag_sc |
def scp_file(file, ip, path, ssh_key=None):
"""scp file to remote machine
"""
if ssh_key is None:
scp_cmd_str = 'scp %s %s:%s' % (file, ip, path)
else:
scp_cmd_str = 'scp -i %s %s %s:%s' % (ssh_key, file, ip, path)
return scp_cmd_str |
def get_song_info(data):
""" Get song info from data object.
Arguments:
data: data structure from spotify
Returns: tuple with name, [artists], uri)
"""
return (data['tracks']['items'][0]['name'],
[a['name'] for a in data['tracks']['items'][0]['artists']],
data['tracks']['items'][0]['uri']) |
def zigzag(n):
""" Return indices for zig-zag expanding a n x n matrix
"""
indices = []
for i in range(2*n-1):
if i < n:
for j in range(0,i+1):
if i % 2 == 0: # i = 0, 2, 4, 6, ...
indices.append((i-j,j))
else: # i = 1, 3, 5, ...
indices.append((j,i-j))
else:
for j in range(i+1-n, n):
if i % 2 == 0: # i = 0, 2, 4, 6, ...
indices.append((i-j,j))
else: # i = 1, 3, 5, ...
indices.append((j,i-j))
return indices |
def chunk_list(lst, n):
"""
Split a list into n-sized chunks
"""
lst = list(lst)
return [lst[i:i + n] for i in range(0, len(lst), n)] |
def get_config_value_by_key(config_to_get, config_key):
"""Update config_to_update at config_key to config_value
Example:
Arguments:
config_to_update: {
"a": "b",
"c": {
"d": "e",
"f": "g",
},
}
config_key: "c-f"
Returns:
"g"
"""
temp = config_to_get
config_key = config_key.split("-")
len_config_key = len(config_key)
for i in range(len_config_key):
if i < (len_config_key - 1):
temp = temp[config_key[i]]
elif i == (len_config_key - 1):
return temp[config_key[i]]
else:
raise ValueError |
def carbonblack_binaryinfo_host_observed(rec):
"""CarbonBlack BinaryInfo Host Observed Watchlist Match"""
return (rec['hostname'] == 'FS-HQ' and
rec['md5'] == '9E4B0E7472B4CEBA9E17F440B8CB0AB8'
) |
def convert_name(names):
"""
Converts name into a simple form suitable for the CrossRef search
"""
if ';' in names:
names = names.split('; ')
else:
names = [names]
out = []
for name in names:
name = name.split(', ')
name = name[::-1]
name = '+'.join(name)
out.append(name)
return ', '.join(out) |
def _get_subgroup(x):
"""Apply function to return one of eight unique subgroups"""
race, gender = x
if race == "B":
subgroup = "Black"
elif race == "H":
subgroup = "Latinx"
elif race == "A" or race == "P":
subgroup = "Asian"
else:
subgroup = "Other"
if gender == "M":
return subgroup + " Male"
elif gender == "F":
return subgroup + " Female"
else:
return subgroup + " Other" |
def Deep_Copy(content):
"""
If content is a regular variable, return that variable.
Otherwise, construct a list and append values from the
original to it.
"""
if isinstance(content, list):
ret = []
for i in content:
ret.append(i)
elif isinstance(content, (int, float, type(None), str, bool)):
ret = content
else:
raise ValueError("Unexpected type for Deep_Copy function")
return(ret) |
def _find_letter(letter, alphabet_square):
"""helper function to find the index of a specified letter in the
alphabet square
Args:
letter (char): letter to search for
alphabet_square (list of list of str): adfgvx translation matrix
Raises:
ValueError: raised if letter is not in square
Returns:
(int, int): row, col indexes of the letter in the square
"""
for i in range(len(alphabet_square)):
for j in range(len(alphabet_square)):
if letter.upper() == alphabet_square[i][j].upper():
return i, j
raise ValueError("Your alphabet square does not contain letter in plaintext:", letter) |
def attrib2pred(s: str) -> str:
"""Function to convert attribute to RDF predicate
Args:
s: the attribute
Returns:
str: the RDF predicate
"""
return "has" + s[0].upper() + s[1:] |
def load_src_wmap(path):
"""Loads a source side word map from the file system.
Args:
path (string): Path to the word map (Format: word id)
Returns:
dict. Source word map (key: word, value: id)
"""
global src_wmap
if not path:
src_wmap = {}
return src_wmap
with open(path) as f:
src_wmap = dict(map(lambda e: (e[0], int(e[-1])),
[line.strip().split() for line in f]))
return src_wmap |
def get_keys_with_subkeyval(dic: dict, subkey: str, subval: str) -> dict:
""" return dic subset with subkey:subval
Example
>> dic = torch.load("mydir.pt")
>> get_keys_with_subkeyval(my_dic, "datesize", "c828d7d9d3aafd0b70127aae84208d97")
"""
return {key:dic[key] for key in dic if dic[key][subkey] == subval} |
def is_punct(text):
"""
returns true if the text consists solely of non alpha-numeric characters
"""
for letter in text.lower():
if letter in set('abcdefghijklmnopqrstuvwxyz1234567890'): return False
return True |
def any_tasks_failed(tasks):
"""Return True if any rq job failed."""
for task in tasks:
if task.get_rq_job() is None:
# job is not available in rq anymore
continue
else:
if task.get_rq_job().get_status() == "failed":
return True
return False |
def nrrcus_stnid(stnid):
"""
Number of RCUs from station ID
Parameters
---------
stnid: str
Station ID
Returns
-------
nrrcus: int
Number of RCUs
"""
location_id = stnid[:2]
if location_id == 'CS' or location_id == 'RS':
nrrcus = 96
else:
# EU station
nrrcus = 192
return nrrcus |
def interpolation_recursive_search(array, value, left, right):
"""
:param array: list of values
:param value: value to search for
:param left: value in the left margin
:param right: value in the right margin
:return: index of the value
If not found, return -1
Time complexity: O(log(n))
"""
# base case when recursive list minimized to empty
if left > right:
return -1
# Choose the middle position close to the left margin or right margin depends on value
mid = left + (right - left) * (value - array[left]) // (array[right] -
array[left])
# base case when value bigger than all number of the list
if mid > right:
return -1
# Found the value
if value == array[mid]:
return mid
# if value less than the middle point, recursive the left part
if value < array[mid]:
return interpolation_recursive_search(array, value, left, mid - 1)
# if value bigger than the middle point, recursive the right part
if value > array[mid]:
return interpolation_recursive_search(array, value, mid + 1, right) |
def match_channels(channels):
"""Algorithm for matching individual channels"""
idx_pairs = []
for i in range(1, len(channels), 2):
# Currently matching serially
idx_pairs.append((i - 1, i))
return idx_pairs |
def _is_empty(mime_type):
"""
Return whether a template file is an empty file.
"""
return mime_type == 'inode/x-empty' |
def make_truthy_filter(column: str, value) -> str:
"""
Return a SQL filter that matches if a column is truthy or falsy.
Truthy means TRUE. Falsy means FALSE or NULL. This is akin
to how Python defines it natively.
Passing in `value == None` returns the empty string.
Examples
--------
>>> make_truthy_filter('PROJECT.trashed', True)
'AND PROJECT.trashed'
>>> make_truthy_filter('PROJECT.trashed', False)
'AND NOT IFNULL(PROJECT.trashed, 0)'
>>> make_truthy_filter('PROJECT.trashed', None)
''
"""
if value is None:
return ""
if value:
return f"AND {column}"
return f"AND NOT IFNULL({column}, 0)" |
def iso7816_4_pad(message, total_len):
"""
Pads a message according as specified in ISO 7816-4:
- Append byte 0x80
- Pad the message to requested length with NULL bytes
"""
if len(message) >= total_len:
raise ValueError(
f'Padded message is at least {len(message) + 1} bytes long'
)
return (message + b'\x80').ljust(total_len, b'\x00') |
def _get_row(testdata):
"""Get row or rownum or row_num from in testdatafile"""
keys = ["row", "row_num", "rownum"]
value = ""
for key in keys:
td_row = testdata.get(key, None)
if td_row is None:
continue
else:
value = td_row
break
return value |
def success_probability_to_polarization(s, n):
"""
Maps a success probablity s for an n-qubit circuit to
the polarization, defined by p = (s - 1/2^n)/(1 - 1/2^n)
"""
return (s - 1 / 2**n) / (1 - 1 / 2**n) |
def removeElement(nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
for i in range(nums.count(val)):
nums.remove(val)
return len(nums) |
def get_weight(column):
"""Get sorting weight of a table"""
try:
return int(column.weight)
except AttributeError:
return 0 |
def delete_empty_fields(d: dict) -> dict:
"""
Deletes the None fields in a dictionary recursively
Mostly used to make resulting JSON dumps lighter
Args:
d: The dictionary to reduce
Returns:
The reduced dictionary
"""
for key, value in list(d.items()):
if isinstance(value, dict):
delete_empty_fields(value)
elif value is None:
d.pop(key)
elif isinstance(value, list):
for list_value in value:
if isinstance(list_value, dict):
delete_empty_fields(list_value)
return d |
def parse_scale_factor(value):
"""
Attempts to parse a scale factor from the UI (int) into the decimal required by the mosaic classes
"""
try:
return float(value) / 100.0
except:
return 0.33 |
def convert_cash(price):
"""
Converts price string to float.
---
IN
price: dollar amount (str)
OUT
price_float: price (float)
"""
if price == None:
return None
trans = str.maketrans('','',',$')
price_float = float(price.translate(trans))
return price_float |
def api_url(host):
"""
Make api url for dropcaches
:param host: str
:return: str
"""
return '{}/api/dropcaches/'.format(host) |
def eratosthenes_sieve(n):
"""
Sieve of Eratosthenes
Complexity: O(NloglogN)
We can find all the prime number up to specific
point. This technique is based on the fact that
the multiples of a prime number are composite numbers.
That happens because a multiple of a prime number will
always have 1, itself and the prime as a divisor (maybe
even more) and thus, it's not a prime number. A common
rule for sieves is that they have O(logN) complexity.
"""
primes = [True] * (n+1)
primes[0] = False
primes[1] = False
for i in range(2, int(n**0.5) + 1):
if primes[i]:
for j in range(i*i, n+1, i):
primes[j] = False
final_primes = []
for i in range(len(primes)):
if primes[i]:
final_primes.append(i)
return final_primes |
def round_dict(d, n):
"""
Rounds float values in the dict to n digits after the decimal point.
"""
return {k: (round(v, n) if isinstance(v, float) else v) for k, v in d.items()} |
def get_badge_path(label, value, color):
"""Return a string representing the expected badge filename. Returns something like 'Training Name|Supported' or 'Training Name|Unsupported'."""
safe_label = label.replace('@', '%40').replace(' ', '%20').replace('-', '--').replace('/', '%2F')
safe_value = value.replace('@', '%40').replace(' ', '%20').replace('-', '--').replace('/', '%2F')
return '%s-%s-%s.svg' % (safe_label, safe_value, color) |
def expand_sign(l):
"""
expand the set with signs,
example: [a,b,c] => [+/- a, +/- b, +/- c]
"""
if len(l) == 1:
res = [[l[0]]]
if l[0] != 0:
res = res + [[-l[0]]]
return res
rests = expand_sign(l[1:])
res = [[l[0]] + s for s in rests]
if l[0] != 0:
res += [[-l[0]] + s for s in rests]
return res |
def inverter_elementos(iteravel):
"""
Inverte os elementos dentro de um tuplo.
:param iteravel: tuplo
:return: tuplo
Exemplo:
>>> tpl = ("a1", "b3")
>>> inverter_elementos(tpl)
("1a", "3b")
"""
res = ()
for e in iteravel:
res += (e[::-1],)
return res |
def match_sent(sentences , output_path):
"""
The reconstruction of sentences with distilbert tokenization to recover
the original sentence can't be done with just "join". So we add some preprocessing
steps to recover approximately the same sentences. Note that we will lose some
properties such as capital letters.
This function adds also spans + save all sentences in a text file.
"""
sentences = [sentence.replace(' ##','') for sentence in sentences]
sentences = [sentence.replace(" ' ","'") for sentence in sentences]
sentences = [sentence.replace("did n't","didn't") for sentence in sentences]
## add span
sentences = ["<span>"+sentence+'</span>' for sentence in sentences]
with open(output_path, 'w') as output:
for sentence in sentences:
output.write(sentence + '\n')
return sentences |
def human_format(num, precision=0):
"""
Format a number as a string, suffixing letter for 1000 (K), 100000 (M), ...
:param num: any number larger than zero
:param precision: number of positions after decimal point
:return: string representing the number
"""
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '{:.{prec}f}{}'.format(num, ['', 'k', 'M', 'G', 'T', 'P'][magnitude], prec=precision) |
def number_attribute(attribute, operator, value):
"""
Select an audience to send to based on an attribute object with a INTEGER schema
type, including predefined and device attributes.
Please refer to https://docs.airship.com/api/ua/?http#schemas-numberattribute for
more information about using this selector, including information about required
data formatting for values.
Custom attributes must be defined in the Airship UI prior to use.
"""
if operator not in ["equals", "contains", "less", "greater", "is_empty"]:
raise ValueError(
"operator must be one of 'equals', 'contains', 'less', 'greater', 'is_empty'"
)
if type(value) is not int:
raise ValueError("value must be an integer")
return {"attribute": attribute, "operator": operator, "value": value} |
def get_r_list(area1, area2, max_area, tol=0.02):
"""
Returns a list of r1 and r2 values that satisfies:
r1/r2 = area2/area1 with the constraints:
r1 <= Area_max/area1 and r2 <= Area_max/area2
r1 and r2 corresponds to the supercell sizes of the 2 interfaces
that align them
"""
r_list = []
rmax1 = int(max_area / area1)
rmax2 = int(max_area / area2)
print('rmax1, rmax2: {0}, {1}\n'.format(rmax1, rmax2))
for r1 in range(1, rmax1 + 1):
for r2 in range(1, rmax2 + 1):
if abs(float(r1) * area1 - float(r2) * area2) / max_area <= tol:
r_list.append([r1, r2])
return r_list |
def normalize_b64_data(coding):
"""
Normalize base64 key. See http://bit.ly/2vxIAnC for details.
:param coding: Encoded data
:return: Normalized encoded data
"""
missing_padding = len(coding) % 4
if missing_padding != 0:
coding += b'=' * (4 - missing_padding)
return coding |
def _weight(entry):
"""
Sum a power of frequency.
Word frequencies have a skew with a long tail toward infrequent.
"""
weight = 500 * entry[0] ** 0.25
for i in range(3, len(entry), 2):
weight -= (entry[i] / 100.0) ** 4 * 100
return weight |
def floyd_warshall(matrix):
"""Applies the Floyd-Warshall shortest paths algorithm to a DBM matrix.
Args:
matrix: The DBM matrix.
Returns:
The closed form of the DBM matrix.
"""
for k in range(0, len(matrix)):
for i in range(0, len(matrix)):
for j in range(0, len(matrix[0])):
if i != j:
new_entry = matrix[i][k] + matrix[k][j]
if new_entry < matrix[i][j]:
matrix[i][j] = new_entry
return matrix |
def getVersionString(plist, key=None):
"""Gets a version string from the plist.
If a key is explictly specified, the value of that key is
returned without modification, or an empty string if the
key does not exist.
If key is not specified:
if there's a valid CFBundleShortVersionString, returns that.
else if there's a CFBundleVersion, returns that
else returns an empty string.
"""
VersionString = ''
if key:
# admin has specified a specific key
# return value verbatum or empty string
return plist.get(key, '')
# default to CFBundleShortVersionString plus magic
# and workarounds and edge case cleanupds
key = 'CFBundleShortVersionString'
if not 'CFBundleShortVersionString' in plist:
if 'Bundle versions string, short' in plist:
# workaround for broken Composer packages
# where the key is actually named
# 'Bundle versions string, short' instead of
# 'CFBundleShortVersionString'
key = 'Bundle versions string, short'
if plist.get(key):
# return key value up to first space
# lets us use crappy values like '1.0 (100)'
VersionString = plist[key].split()[0]
if VersionString:
if VersionString[0] in '0123456789':
# starts with a number; that's good
# now for another edge case thanks to Adobe:
# replace commas with periods
VersionString = VersionString.replace(',', '.')
return VersionString
if plist.get('CFBundleVersion'):
# no CFBundleShortVersionString, or bad one
# a future version of the Munki tools may drop this magic
# and require admins to explicitly choose the CFBundleVersion
# but for now Munki does some magic
VersionString = plist['CFBundleVersion'].encode('utf-8').split()[0]
if VersionString[0] in '0123456789':
# starts with a number; that's good
# now for another edge case thanks to Adobe:
# replace commas with periods
VersionString = VersionString.replace(',', '.')
return VersionString
return '' |
def remove_quotes(string):
"""
This function is used here to remove quotes from
paths used in this script.
:param string: Path with quotes.
:return: Path without quotes.
"""
if string.startswith('"'):
string = string[1:]
if string.endswith('"'):
string = string[:-1]
return string |
def fetchone(cursor):
"""
Fetch one row from the given cursor
Params:
cursor (Cursor) : Cursor of previously executed statement
Returns:
dict : Dict representing the records selected indexed by column name
"""
if cursor:
return dict(zip((d.name for d in cursor.description), cursor.fetchone()))
else:
return {} |
def custom_props(device_id, run_id=None):
"""
helper function for adding customDimensions to logger calls at execution time
"""
props = {"deviceId": device_id}
if run_id:
props["runId"] = run_id
return {"custom_dimensions": props} |
def get_formatted_name(first_name, last_name):
"""Return a full name, neatly formatted."""
full_name = first_name + ' ' + last_name
return full_name.title() |
def convert_sqr(sqr):
"""
Converts index (0,63) to 2d indices of (0,7)
"""
#r = 7 - r #7-r flips rows so array looks like chess board
return 7 - (sqr // 8), sqr % 8 |
def check_bar_match(old_bar, new_bar):
"""Check if two bars belong in the same collection (bar chart).
Positional arguments:
old_bar -- a previously sorted bar dictionary.
new_bar -- a new bar dictionary that needs to be sorted.
"""
tests = []
tests += new_bar['orientation'] == old_bar['orientation'],
tests += new_bar['facecolor'] == old_bar['facecolor'],
if new_bar['orientation'] == 'v':
new_width = new_bar['x1'] - new_bar['x0']
old_width = old_bar['x1'] - old_bar['x0']
tests += new_width - old_width < 0.000001,
tests += new_bar['y0'] == old_bar['y0'],
elif new_bar['orientation'] == 'h':
new_height = new_bar['y1'] - new_bar['y0']
old_height = old_bar['y1'] - old_bar['y0']
tests += new_height - old_height < 0.000001,
tests += new_bar['x0'] == old_bar['x0'],
if all(tests):
return True
else:
return False |
def ttfAutohintDict( parameterValue ):
"""Returns a dict for a TTFAutohint parameter value."""
ttfAutohintDict = {}
for ttfAutohintOption in parameterValue.split("--"):
if "=" in ttfAutohintOption:
[key, value] = ttfAutohintOption.split("=")
value = value.strip()
else:
key = ttfAutohintOption
value = None
if key:
ttfAutohintDict[key.strip(" -")] = value
return ttfAutohintDict |
def cut_to_length(text, length, delim):
"""Shorten given text on first delimiter after given number
of characters.
"""
cut = text.find(delim, length)
if cut > -1:
return text[:cut]
else:
return text |
def modify_string( string, part2=False ):
"""
The string representing the expression is modified such that
the numbers are replaced by the newly defined number classes
that determine the operator precedence.
"""
modified_string = ''
numbers_list = ['0','1','2','3','4','5','6','7','8','9']
num_class = 'NumType1' if not part2 else 'NumType2'
for char in string:
if char in numbers_list:
modified_string += num_class + '(' + char + ')'
else:
modified_string += char
return modified_string |
def list_on_key(dict_list, key):
""" Is there a one-liner for this?
"""
return_list = []
for sub_dict in dict_list:
return_list += [sub_dict[key]]
return return_list |
def stringinlist(s, l):
""" check is string is in list of strings. """
for i in l:
if s in i: return True
return False |
def testIfSumIsDivisable(numA=673, numB=909, div=3):
"""
task 0.5.3
return true if the sum of two numbers is divisible by gin divisor
"""
return (numA + numB) % div == 0 |
def row_index(s_i: int) -> int:
"""row index [0, 1, ... 8] on the 9x9 board from the state index [0, 1, ... 80]"""
return 3 * (s_i // 27) + (s_i % 9) // 3 |
def _dms2dd(d, m, s, dir):
"""
convert lat/lon degrees/minutes/seconds into decimal degrees
"""
degrees = float(d) + (float(m) / 60) + (float(s) / 3600)
if dir in ['S', 'W']:
degrees = degrees * -1
return degrees |
def conj(coll, to_add):
"""
Similar to clojure's function, add items to a list or dictionary
See https://clojuredocs.org/clojure.core/conj for more reading
Returns a new collection with the to_add 'added'. conj(None, item) returns
(item). The 'addition' may happen at different 'places' depending on the
concrete type. if coll is:
[] - appends [1, 2, 3, 4] == conj([1, 2], [3, 4])
() - prepend in reverse ((4, 3, 1, 2) == conj((1, 2), (3, 4))
{} - appends {'a': 'A', 'b': 'B'} == conj({'a':'A'}, {'b':'B'})
Parameters:
coll: collection to add items to
to_add: items to be added to coll
Return:
object of the same type as coll but with to_add items added
"""
ret = coll
if coll is None:
ret = to_add
elif isinstance(coll, list):
ret = coll + to_add
elif isinstance(coll, tuple):
ret = list([])
for item in coll:
ret.append(item)
for item in to_add:
ret.insert(0, item)
ret = tuple(ret)
elif isinstance(coll, dict):
ret = {}
for key in coll:
ret[key] = coll[key]
for key in to_add:
if key not in ret:
ret[key] = to_add[key]
return ret |
def mps3_bin_names(application_note: int):
"""
Returns expected binary names for the executable built
for Cortex-M55 or Cortex-M55+Ethos-U55 targets in the
form of a dict with index and name
"""
bin_names_540 = {
0: "itcm.bin",
1: "dram.bin"
}
if application_note == 540:
return bin_names_540
return {} |
def find_rank(wr):
"""
Return the list of ranks for the solution kappa.
Parameter:
wr -- list of Decimal
Return:
rank -- list of integer
"""
# List of ranks
rank = []
# If the list is not empty, retrieve the rank in [0,1]
if wr:
# Count number of rank increment ('wr1')
# and rank decrement ('wr2')
counter_1 = sum(1 for tuple in wr if tuple[1] == ('wr1'))
counter_2 = sum(-1 for tuple in wr if tuple[1] == ('wr2'))
num_wr = counter_1 + counter_2
rank_init = 1 + num_wr
rank.append(rank_init)
for tuple in wr:
# If 'wr1', rank increases
if tuple[1] == ('wr1'):
rank.append(rank[-1] - 1)
# If 'wr2', rank decreases
if tuple[1] == ('wr2'):
rank.append(rank[-1] + 1)
else:
rank = [1]
return rank, wr |
def deepReverse(L):
"""reverses order of list"""
def reverseHelp(L, lst):
if(L == []):
return lst
if(isinstance(L[0], list)):
return reverseHelp(L[1:], [deepReverse(L[0])]+lst)
else:
return reverseHelp(L[1:], [L[0]]+lst)
return reverseHelp(L, []) |
def _get_channel_num(port_grp):
""" Return a channel number given a port group. The lowest value
port number in the group is returned. No checks are made to insure
that all ports are in the same chassis.
Args:
port_group: (tuple or list of str representing port numbers
of the form 'n' or 'm/n' or 'ethm/n' or similar
"""
return min([int(port_grp[i].rpartition('/')[-1])
for i in range(len(port_grp))]) |
def evaluateString(a_string):
"""Assumes a_string is a string that consists of numerical values, operators
and parentheses, and ends with '='
returns a numeric, the evaluation of a_string"""
expression = a_string[:-1]
return eval(expression) |
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict:
"""
This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the
second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that
the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to
update specific key-value pairs.
:param base_dict: Complete dictionary of key-value pairs.
:param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent.
:return: A merged single dictionary.
"""
for k, v in base_dict.items():
if k in in_dict.keys():
base_dict[k] = in_dict[k]
return base_dict |
def signedint2bin(x,N=16):
""" It converts an integer to an string with its binary representation """
if x>=0: bStr = bin(int(x))
else: bStr = bin(int(x)%2**16)
bStr = bStr.replace('0b','')
if len(bStr)<N: bStr='0'*(N-len(bStr))+bStr
return bStr[-N:] |
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
Taken from: https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html#post-training-static-quantization
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v |
def get_id_from_playlist_url(url: str) -> str:
"""Return the playlist id of a Spotify playlist given
a Spotify playlist url.
Raise a ValueError if no such id can be found.
>>> get_id_from_playlist_url('https://open.spotify.com/playlist/37i9dQZF1DWXT8uSSn6PRy?si=UutGRn1YR3CGl1Tw8WhHpQ')
'37i9dQZF1DWXT8uSSn6PRy'
"""
start = url.find('playlist/') + len('playlist/')
end = url.find('?')
to_return = url[start: end]
if to_return == '':
raise ValueError
else:
return to_return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.