content
stringlengths 42
6.51k
|
|---|
def sstfbfiles(date):
"""Observation file locator stub
.. note:: User-specified stub
:param date: The verification date in string format ``'%Y%m%d'``
:returns: List of files
"""
files = ['sstfb.nc']
return files
|
def get_list_of_digits(number: int) -> list:
"""returns list of digits of inputted number"""
result = []
while number != 0:
last_digit = number % 10
number //= 10
result.append(last_digit)
return result[::-1]
|
def basin_extent(string):
""" Returns extent of basin to save data """
basin_dic = {'indus': [40, 65, 25, 85]}
return basin_dic[string]
|
def str_lst_num(string, seperator_key, int_bool):
"""
Turns a string into a list of integers
:param string that is going to be parsed
:param what the paraser is gonna parse by
:param boolean that will tell if the numbers want to be an int or a float
:return lst
"""
if int_bool:
nums = []
lst_strs = string.split(seperator_key)
for string in lst_strs:
integer_form = int(string)
nums.append(integer_form)
return nums
else:
nums = []
lst_strs = string.split(seperator_key)
for string in lst_strs:
integer_form = float(string)
nums.append(integer_form)
return nums
|
def pace_str_to_secs(pace_str):
"""Helper function for reading paces from file."""
times = [float(t) for t in pace_str.split(':')]
if len(times) == 3:
return times[0] * 3600 + times[1] *60 + times[2]
return times[0] * 60 + times[1]
|
def create_preference_branch(this, args, callee):
"""Creates a preference branch, which can be used for testing composed
preference names."""
if args:
if args[0].is_literal:
res = this.traverser.wrap().query_interface('nsIPrefBranch')
res.hooks['preference_branch'] = args[0].as_str()
return res
|
def get_by_name(yaml, ifname):
"""Return the VXLAN by name, if it exists. Return None otherwise."""
try:
if ifname in yaml["vxlan_tunnels"]:
return ifname, yaml["vxlan_tunnels"][ifname]
except KeyError:
pass
return None, None
|
def split_to_chunks(x, y, max_chunk_size, overlapping_size):
"""
We split input data into `max_training_size` chunks with each chunk overlapping `overlapping_size` points
with the previous and next chunks.
:param x: unscaled feature matrix of size n x 1
:param y: unscaled label matrix of size n x 2
:param max_chunk_size: the max size of each chunks
:param overlapping_size: the #points overlapping between each consecutive chunks
:return: list of tuples where each tuple contains (x_i, y_i) of i-th chunk
"""
chunks = list()
n = len(x)
i = 0
while True:
next_i = min(i + max_chunk_size, n)
chunks.append((x[i:next_i], y[i:next_i]))
if n <= next_i:
break
i = next_i - overlapping_size
return chunks
|
def list_strip(seq):
"""Remove None and '' on both sides
Args:
seq (list or tuple): list to remove None and ''
Returns:
list: trimmed list
"""
if not type(seq) in [list, tuple]:
raise TypeError
if type(seq) is tuple:
temp = list(seq)
else:
temp = seq[0:] # copy
while temp: # forward
if temp[0] is None or temp[0] == '':
temp.pop(0)
else:
break
while temp: # backward
if temp[-1] is None or temp[-1] == '':
temp.pop()
else:
break
return temp
|
def _sabr_implied_vol_hagan_A11_fhess_by_strike(
underlying, strike, maturity, alpha, beta, rho, nu):
"""_sabr_implied_vol_hagan_A11_fhess_by_strike
See :py:func:`_sabr_implied_vol_hagan_A11`.
:param float underlying:
:param float strike:
:param float maturity:
:param float alpha: must be within :math:`[0, 1]`.
:param float beta: must be greater than 0.
:param float rho: must be within :math:`[-1, 1]`.
:param float nu: volatility of volatility. This must be positive.
:return: value of factor.
:rtype: float.
"""
one_minus_beta_half = (1.0 - beta) / 2.0
one_plus_beta_half = (1.0 + beta) / 2.0
three_plus_beta_half = (3.0 + beta) / 2.0
factor = -(one_minus_beta_half
* (underlying ** one_minus_beta_half)
* one_plus_beta_half)
return factor * (strike ** (-three_plus_beta_half))
|
def get_offset_from_var(var):
"""
Helper for get_variable_sizes)_
Use this to calculate var offset.
e.g. var_90, __saved_edi --> 144, -1
"""
instance = False
i=0
# Parse string
i = var.rfind(' ')+1
tmp = var[i:-1]
# Parse var
if tmp[0] == 'v':
tmp = tmp[4:]
j = tmp.find('_')
# Handles SSA var instances (var_14_1) and converts c, 58, 88 --> 12, 88, 136
if (j != -1):
tmp = tmp[:j]
instance = True
else:
instance = False
try:
tmp = int(tmp, 16)
except:
tmp = -1
# -1 for non vars
else:
tmp = -1
return tmp, instance
|
def get_match_patterns(
match=None,
ignore=None,
ignore_extensions=None,
ignore_hidden=False,
):
"""Helper to compose a list of list of glob/wildcard (".gitignore style") match
patterns based on options dedicated for a few standard use-cases.
# Arguments
match: Optional[List[str]] - A list of match-patterns for files to *include*.
Default `None` which is equivalent to `['*']`, i.e. everything is
included (unless excluded by arguments below).
ignore: Optional[List[str]] - A list of match-patterns for files to
*ignore*. Default `None` (no ignore patterns).
ignore_extensions: Optional[List[str]] - A list of file extensions to
ignore. Short for `ignore=['*.<my extension>', ...]` Default `None` (no
extensions ignored).
ignore_hidden: bool - If `True` ignore hidden files and directories. Short
for `ignore=['.*', '.*/']` Default `False`.
"""
match = ['*'] if match is None else list(match)
ignore = [] if ignore is None else list(ignore)
ignore_extensions = [] if ignore_extensions is None else list(ignore_extensions)
if ignore_hidden:
ignore.extend(['.*', '.*/'])
for ext in ignore_extensions:
if not ext.startswith('.'):
ext = '.' + ext
ext = '*' + ext
ignore.append(ext)
match_spec = match + ['!' + ign for ign in ignore]
def deduplicate(items):
items_set = set([])
dd_items = []
for item in items:
if item not in items_set:
dd_items.append(item)
items_set.add(item)
return dd_items
return deduplicate(match_spec)
|
def set_xhr_breakpoint(url: str) -> dict:
"""Sets breakpoint on XMLHttpRequest.
Parameters
----------
url: str
Resource URL substring. All XHRs having this substring in the URL will get stopped upon.
"""
return {"method": "DOMDebugger.setXHRBreakpoint", "params": {"url": url}}
|
def akaike(LnL, k):
"""
Computes the Akaike Information Criterion: 2k-2ln(L),
where k is the number of estimated parameters in the model and LnL is the
max ln-likelihood for the model.
"""
return 2*k-2*LnL
|
def ideal_share(share, total_shares, total_alloc):
"""
Calculate the ideal share of proportions and totals.
Parameters
----------
share : int
The proportion to be checked.
total_shares : int
The total amount of shares.
total_alloc : int
The number of allocations to provide.
Returns
-------
ideal : float
The ideal share that would be allocated.
"""
return 1.0 * share / total_shares * total_alloc
|
def findNextBracket(text, bracket, pos):
"""Find first occurence of bracket after pos that is not preceded
by backslash
Return its position in text.
None if not found.
"""
while True: # -1 = not found
pos = text.find(bracket, pos)
if pos == -1:
return None
if pos == 0 or text[pos - 1] != "\\":
return pos
pos = pos + 1
|
def check_for_comment_block(line, file_type):
"""
:param line:
:param file_type:
:return:
"""
line = line.strip()
if file_type == "py":
return line.startswith('"""') and ('"""' not in line and len(line) > 3)
if file_type in ["java", "js", "jsx", "c", "cpp", "cxx", "h"]:
return line.startswith("/*") and "*/" not in line
if file_type in ["jsp"]:
return line.startswith("<%--") and not line.endswith("%-->")
|
def lwb_competitive(inverse_substrate: float, vmax: float = 1., km: float = 5.,
ki: float = 5., conc_i: float = 5) -> float:
"""
calculate 1/v0 based on substrate for competitive inhibitors
"""
return ((km*(1+(conc_i/ki)))/vmax)*inverse_substrate+(1/vmax)
|
def percent_to_decimal(var):
"""if input in percent (>=1) convert to decimal"""
if not isinstance(var, list) and not isinstance(var, tuple):
dec = var / 100 if var >= 1 else var
else:
dec = []
for val in var:
if val >= 1:
# if var is in percentage form divide by 100
dec.append(val / 100)
else:
# if var is in decimal form no correction needed
dec.append(val)
return dec
|
def mean_list_elements(in_list):
"""
Return mean of list elements, e.g. [2,3,5] -> 1.0* (2 + 3 + 5) / 3 = 3.333 = numpy.mean([2,3,5]
Note: this method is ok, but in general better use numpy.mean()
"""
return 1.0 * sum(in_list) / len(in_list)
|
def combat(health, damage):
"""Take health and damage and return difference or zero."""
new_health = health - damage
if new_health <= 0:
return 0
else:
return new_health
|
def clamp(val, limit):
"""Limit value to symmetric range.
Args:
val: Value to be adjusted.
limit: Absolute value of return value will not be greater than this.
Returns:
The input value limited to the range [-limit,+limit].
"""
return max(min(limit, val), -limit)
|
def get_expected_output_files_dict(bam_base_out, report_base_out):
"""Helper method.
:param bam_base_out: Expected name pattern of BAM associated files without extension.
For example if the full path would be '/path/to/step_part.bam', argument should be
'/path/to/step_part'.
:type bam_base_out: str
:param report_base_out: Expected name pattern of report associated files without extension.
For example if the full path would be '/path/to/step_report.bam.bamstats.html', argument should
be '/path/to/step_report'.
:return: Returns dictionary with expected path for BAM and report associated files based on the
provided input.
"""
# Define expected
expected = {
"bam": bam_base_out + ".bam",
"bam_bai": bam_base_out + ".bam.bai",
"bam_bai_md5": bam_base_out + ".bam.bai.md5",
"bam_md5": bam_base_out + ".bam.md5",
"report_bamstats_html": report_base_out + ".bam.bamstats.html",
"report_bamstats_html_md5": report_base_out + ".bam.bamstats.html.md5",
"report_bamstats_txt": report_base_out + ".bam.bamstats.txt",
"report_bamstats_txt_md5": report_base_out + ".bam.bamstats.txt.md5",
"report_flagstats_txt": report_base_out + ".bam.flagstats.txt",
"report_flagstats_txt_md5": report_base_out + ".bam.flagstats.txt.md5",
"report_idxstats_txt": report_base_out + ".bam.idxstats.txt",
"report_idxstats_txt_md5": report_base_out + ".bam.idxstats.txt.md5",
}
# Return
return expected
|
def assign_bonds_to_groups(tors, group):
"""
|
**Description:** Make a group for each torsion bond
and keep track of how many members
Finally it returns the biggest group.
**Input:**
- Tors: atoms with torsions
- Group: Atoms grouped by proximity
**Output:**
- output: lit of group_numbers
- big_grup: biggest group
- nbig_group: members on the biggest group
"""
output = []
big_group = -1
nbig_group = 0
ngroup = max(group)
ngroup_members = []
##Hauria danar un mes??
for i in range(ngroup + 1):
ngroup_members.append(0)
for t in tors:
group_number = max(group[t[0]], group[t[1]])
output.append(group_number)
if (group_number >= 0):
ngroup_members[group_number] = ngroup_members[group_number] + 1
for i in range(ngroup + 1):
if (ngroup_members[i] > nbig_group):
nbig_group = ngroup_members[i]
big_group = i
return output, big_group, nbig_group
|
def round_decimal_to_str(value , to=3):
"""
Utility method for rounding the decimal value to string to given digits
@param value : value to round
@param to : how many decimal points to round to
"""
rounded_value = '-'
if value is None:
return rounded_value
rounded_value = str(round(float(value), to))
return rounded_value
|
def filter_stream(streams, excludes):
"""
Uses a list of keywords to remove sensors or streams from the list returned by OOI Net.
:param streams: list of sensor or streams returned from OOI Net
:param excludes: list of keywords to use in pruning the list
:return: a cleaned, pruned list
"""
clean = []
for stream in streams:
if not any(sub in stream for sub in excludes):
clean.append(stream)
return clean
|
def fibonacci_memoization(n):
"""Memoization implementation of fibonacci. O(n) runtime, O(n) max stack frames, O(n) pre-allocated space"""
d = [0] * (n + 1)
d[1] = 1
d[2] = 1
def fibonacci_memoization_helper(k):
if d[k] == 0:
d[k] = fibonacci_memoization_helper(k - 1) + fibonacci_memoization_helper(k - 2)
return d[k]
return fibonacci_memoization_helper(n)
|
def split_haiku(haiku):
"""Split haiku, remove the period and new_line"""
word_array = haiku.lower().replace('.', '').split()
return word_array
|
def format_CALL_FUNCTION_pos_name_encoded(argc):
"""Encoded positional and named args. Used to
up to about 3.6 where wordcodes are used and
a different encoding occurs. Pypy36 though
sticks to this encoded version though."""
pos_args = argc & 0xFF
name = (argc >> 8) & 0xFF
return ("%d positional, %d named" % (pos_args, name))
|
def indexing(pair):
"""
The indexing method receives a pair of user X and followed users by X.
for example it receives (X, Fi) as a pair which X follows Fi.
This method returns (Fi, X) and (X, -Fi).
:param pair: (X, Fi) as a pair which X follows Fi.
:return: is a list of pairs [(Fi, X), (X, -Fi)].
"""
user, followed_by_user = pair
result = []
for f in followed_by_user:
result.append((f, user))
result.append((user, '-' + f))
return result
|
def getPositionPdf(i, nb_col):
"""Return the position of the square on the pdf page"""
return [int(i / nb_col), i % nb_col]
|
def get_rel_path(path, base):
"""get relative path, e.g., get_rel_path('abc/de/fg', 'abc') => 'de/fg'
"""
lb = len(base)
assert path[:lb] == base
if len(path) == lb:
rel_path = ''
elif path[lb] == '/':
rel_path = path[lb+1:]
else:
rel_path = path[lb:]
return rel_path
|
def stringify(value):
"""
Returns the string representation of the value.
"""
if value is None:
return 'null'
elif value is True:
return 'True'
elif value is False:
return 'False'
return str(value)
|
def get_all_uris(arr):
""" Get all uris in arr.
Args:
arr: Dictionary where uris are contained.
Return:
uris: An array containing all uris retrieved.
"""
uris = []
for i in range(len(arr['items'])):
uris.append(arr['items'][i]['uri'])
return uris
|
def _choose_gqa(additional_files):
"""
Choose the latest GQA in a list of files (or none if there aren't any).
:type additional_files: tuple[pathlib.Path]
:rtype: pathlib.Path or None
>>> from pathlib import Path
>>> files = (
... Path('additional/20141201_19991029_B6_gqa_results.yaml'),
... Path('additional/20141201_20000321_B6_gqa_results.yaml')
... )
>>> str(_choose_gqa(files))
'additional/20141201_20000321_B6_gqa_results.yaml'
>>> str(_choose_gqa(files[:1]))
'additional/20141201_19991029_B6_gqa_results.yaml'
>>> _choose_gqa(())
"""
gqa_files = [f for f in additional_files if f.name.endswith('gqa_results.yaml')]
if not gqa_files:
return None
newest_first = list(sorted(gqa_files, reverse=True))
return newest_first[0]
|
def reverse_words(text):
"""
Complete the function that accepts a string parameter, and reverses each word in the string. All spaces
in the string should be retained.
:param text: string of words.
:return: all words in the string reversed.
"""
return " ".join(x[::-1] for x in text.split(" "))
|
def nominal_rate(kl, ne):
"""update nominal migration rate with nonlocal effects array"""
new_kl = kl*(1+ne)
return new_kl
|
def NonsfiLoaderArch(target):
"""Returns the arch for the nonsfi_loader"""
arch_map = { 'arm32' : 'arm',
'x8632' : 'x86-32',
'mips32' : 'mips32',
}
return arch_map[target]
|
def index6(i):
""" converts from a vector index to a tensor index"""
return [(0,0),(0,1),(0,2),(1,1),(1,2),(2,2)][i]
|
def identify_tracks_to_add(client, source_playlist, destination_playlist):
"""
Examines the track list of `destination_playlist` to determine if tracks in `source_playlist` are present. Tracks
from `source_playlist` that are not present in `destination_playlist` are added to a list of tracks to be later
added to `destination_playlist`.
:param client:
:param source_playlist:
:param destination_playlist:
:return:
"""
identified_tracks = []
if source_playlist is not None:
source_playlist_tracks = client.get_playlist_tracks(source_playlist)
for track in source_playlist_tracks:
track_in_destination_playlist = client.search_playlist(track['track']['id'], destination_playlist)
if not track_in_destination_playlist:
identified_tracks.append(track['track']['uri'])
return identified_tracks
|
def _segment_less_than(a: str, b: str) -> bool:
"""Return True if a is logically less that b."""
max_len = max(len(a), len(b))
return a.rjust(max_len) < b.rjust(max_len)
|
def _fragmentation(data_size, disk_size):
"""Think of the fragmentation metric is that it's
a measure of the % of the database or view that's used
to store old documents and their associated metadata.
See
https://wiki.apache.org/couchdb/Compaction
and
http://docs.couchdb.org/en/latest/config/compaction.html#compaction-daemon-rules
for details on fragmentation calculation.
"""
if data_size is None or disk_size is None:
return None
fragmentation = ((disk_size - float(data_size)) / disk_size) * 100.0
return int(round(fragmentation, 0))
|
def findEndOfLine(text,offset,lineLength=None):
"""Searches forwards through the given text from the given offset, until it finds the offset that is the start of the next line. With out a set line length, it searches for new line / cariage return characters, with a set line length it simply moves forward to sit on a multiple of the line length.
@param text: the text to search
@type text: str
@param offset: the offset of the text to start at
@type offset: int
@param lineLength: The number of characters that makes up a line, None if new line characters should be looked at instead
@type lineLength: int or None
@return: the found offset
@rtype: int
"""
if not text:
return 0
if offset>=len(text):
offset=len(text)-1
if isinstance(lineLength,int):
return (offset-(offset%lineLength)+lineLength)
end=offset
if text[end]!='\n':
end=text.find('\n',offset)
if end<0:
if text[offset]!='\r':
end=text.find('\r',offset)
if end<0:
end=len(text)-1
return end+1
|
def split_data_list(list_data, num_split):
"""
"""
num_data_all = len(list_data)
num_per_worker = num_data_all // num_split
print("num_data_all: %d" % num_data_all)
#
data_split = []
posi_start = 0
posi_end = num_per_worker
for idx in range(num_split):
list_curr = list_data[posi_start:posi_end]
data_split.append(list_curr)
posi_start = posi_end
posi_end += num_per_worker
#
if posi_start < num_data_all:
data_split[-1].extend(list_data[posi_start:])
#
list_num_data = [len(item) for item in data_split]
print("list_files split: {}".format(list_num_data))
#
return data_split
#
|
def exp(level):
"""The amount of EXP required to achieve a certain level"""
return 5 * level * (level + 5)
|
def is_common_pass(astring):
"""
(str) -> Boolean
returns False if astring is also a string within common[]
common[] contains common passwords that the function checks against.
common includes: 'password', '12345', 'qwerty', 'letmein',
'trustno1', '000000', 'passw0rd'
>>> 'passwordup'
True
>>> 'qwerky'
True
>>> 'qwerty'
False
"""
common = ['password', '12345', 'qwerty', 'letmein', 'trustno1', '000000', 'passw0rd']
if astring in common:
return False
else:
return True
|
def findDuplicate(nums):
"""
:type nums: List[int]
:rtype: int
"""
slow,fast=nums[0],nums[0]
while True:
fast=nums[nums[fast]]
slow=nums[slow]
if fast==slow:
break
fast=nums[0]
while fast!=slow:
fast=nums[fast]
slow=nums[slow]
return slow
|
def add_plaintext_fields(record):
"""
For each field in the record, add a `plain_` field containing the
plaintext, stripped from braces and similar. See
https://github.com/sciunto-org/python-bibtexparser/issues/116.
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
def _strip_string(string):
for stripped in ['{', '}']:
string = string.replace(stripped, "")
return string
for key in list(record.keys()):
plain_key = "plain_{}".format(key)
record[plain_key] = record[key]
if isinstance(record[plain_key], str):
record[plain_key] = _strip_string(record[plain_key])
elif isinstance(record[plain_key], dict):
record[plain_key] = {
subkey: _strip_string(value)
for subkey, value in record[plain_key].items()
}
elif isinstance(record[plain_key], list):
record[plain_key] = [
_strip_string(value)
for value in record[plain_key]
]
return record
|
def swap(to_be_swapped, sample):
"""
Identifies which index of the list samples is present in
the list to be swapped.
E.g.:
to be swapped: [2,3]
sample: [1,3]
Returns:
1, 3, 2
"""
to_be_swapped = set(to_be_swapped)
sample = set(sample)
original = (sample.intersection(to_be_swapped)).pop()
central = (sample - to_be_swapped).pop()
new_partner = (to_be_swapped - sample).pop()
return central, original, new_partner
|
def checkedges(edge_list, e):
"""Function to check if the given edgelist matches.
Attributes:
edge_list (list): List of predicted edges.
e (list): Original edge list
Returns:
bool: Boolean result to denoe if all the edges matches.
"""
val = False
for k in edge_list:
if type(e) is tuple:
if k[1] == e[1] and k[0] == e[0]:
val = True
else:
for ee in e:
if k[1] == ee[1] and k[0] == ee[0]:
val = True
return val
|
def _ambisonics_channel_count_from_order(order, three_dim=True):
"""
Helper function that computes the number of channels for a given ambisonics order.
"""
return (order + 1)**2 if three_dim else (2 * order + 1)
|
def make_list_response(reponse_list, cursor=None, more=False, total_count=None):
"""Creates reponse with list of items and also meta data useful for pagination
Args:
reponse_list (list): list of items to be in response
cursor (Cursor, optional): ndb query cursor
more (bool, optional): whether there's more items in terms of pagination
total_count (int, optional): Total number of items
Returns:
dict: response to be serialized and sent to client
"""
return {
'list': reponse_list,
'meta': {
'next_cursor': cursor.urlsafe() if cursor else '',
'more': more,
'total_count': total_count
}
}
|
def truncate_day(tval):
"""Truncate tval to 0Z."""
return((int(tval)/86400) * 86400)
|
def traverse(d, path):
"""Return the value at the given path from the given nested dict/list"""
for k in path.split('.'):
if k.isdigit():
k = int(k)
d = d[k]
return d
|
def process_field(form, field):
"""
sample
"""
try:
field_name = field.get("name")
field_type = field.get("type")
field_value = field.get("value")
types = ["hidden", "submit", "button", "file", "reset"]
if field_name is not None:
if field_type is not None and field_type not in types:
if ":" in field_name:
name = field_name.split(":")
field_name = name[-1]
return field_type + " " + field_name
return
if field_type == "hidden":
return Field(field_name, field_type, field_value)
elif field_type == "text":
try:
if field_name != "j_captcha_response":
fuzz_pattern = ""
name = field_name.split(":")
final_name = name[len(name) - 1]
if final_name == "studentNumber":
fuzz_pattern = "19"
elif final_name == "documentIdNumber":
fuzz_pattern = "0123456789"
elif final_name == "email":
fuzz_pattern = "mail@ist.utl.pt"
return Field(field_name, field_type, fuzz_pattern)
except AttributeError:
# Some input attributes are blank or aren't of type
# 'string', which can't be coerced; so, we just ignore
# the errors.
pass
elif field_type == "radio":
radio_options = form.find_all("input", {"type": "radio"})
selected = radio_options[random.randrange(len(radio_options))]
return Field(selected.get("name"), field_type, selected.get("value"))
elif field_type == "checkbox":
checkboxes = form.find_all("input", {"type": "checkbox"})
selected = checkboxes[random.randrange(len(checkboxes))]
if selected.has_attr("value"):
return Field(selected.get("name"), field_type, selected.get("value"))
else:
return Field(selected.get("name"), field_type, "on")
elif field_type == "date":
pass
elif field_type == "email":
return Field(field_name, field_type, "example@example.com")
elif field_type == "search":
pass
except AttributeError:
# Some input attributes are blank or aren't of type 'string', which
# can't be coerced; so, we just ignore the errors.
pass
|
def slot_presence(tree, context):
"""Return 1 for all DA slots in the given context.
@rtype: dict
@return: dictionary with keys composed of DA slots and values equal to 1
"""
ret = {}
for dai in context['da']:
if dai.slot is None:
continue
ret[dai.slot] = 1
return ret
|
def capitalize_with_upper(string: str) -> str:
"""
Capitalizes a string by leveraging the upper() method of
strings on the first character.
:param string: an input string
:return: the capitalized string
"""
character = string[0]
return character.upper() + string[1:]
|
def combinations_exact(n, k):
"""Calculates combinations by integer division.
Preferred method for small combinations, but slow on larger ones.
Note: no error checking (expects to be called through combinations())
"""
# permutations(n, k) = permutations(n, n-k), so reduce computation by
# figuring out which requires calculation of fewer terms.
if k > (n - k):
larger = k
smaller = n - k
else:
larger = n - k
smaller = k
product = 1
# compute n!/(n-larger)! by multiplying terms from n to (n-larger+1)
for i in range(larger + 1, n + 1):
product *= i
# divide by (smaller)! by multiplying terms from 2 to smaller
for i in range(2, smaller + 1): # no need to divide by 1...
product /= i # ok to use integer division: should always be factor
return product
|
def calculate_simpson_index(set1, set2):
"""Calculates the Simpson index of two sets"""
size_intersection = float(len(set1.intersection(set2)))
size_smaller_set = min(float(len(set1)), float(len(set2)))
return size_intersection / size_smaller_set
|
def getdiff(lst1, lst2):
""" Get sum of the squared differences """
# loop over each character while keeping a total of the squared differences
# between lst1[i] and lst2[i]. This will return the square of the distance
# between two 26 dimensional vectors.
total = 0
for i in range(26):
total += (lst1[i] - lst2[i]) * (lst1[i] - lst2[i])
return total
|
def inputfiles(otfile, instfile, toofile, prfile):
"""
Get input file info as list of strings.
Example
-------
>>> info = printer.inputfiles('observations.txt', 'instruments.txt', 'too.txt')
>>> [print(line) for line in info]
Observation information retrieved from observations.txt
Instrument calendar retrieved from instruments.txt
ToO information retrieved from too.txt
Parameters
----------
otfile : str
observation catalog file name
instfile : str
instrument calendar file name
prfile : str
program status file name
toofile : str or None
target of opportunity file name
Returns
-------
lines : list of strings
lines of program parameters text
"""
lines = ['',
'Observation information retrieved from ' + otfile,
'Program status information retrieved from ' + prfile,
'Instrument calendar retrieved from ' + instfile]
if toofile is not None:
lines.append('Target of Opportunity information retrieved from ' + toofile)
return lines
|
def get_ticket_detail_hr(tickets):
"""
Iterate over tickets list for human readable in 'risksense-get-host-detail' and 'risksense-get-app-detail' commands.
if only one record found it will add blank dictionary to list to display horizontally in table.
:param tickets: ticket details from the response.
:return: List of ticket details which include required fields from resp.
"""
ticket_list = [{
'Ticket Number': ticket.get('ticketNumber', ''),
'Ticket Status': ticket.get('ticketStatus', ''),
'Deep Link': ticket.get('deepLink', ''),
'Type': ticket.get('type', ''),
'Connector Name': ticket.get('connectorName', ''),
'Detailed Status': ticket.get('detailedStatus', '')
} for ticket in tickets]
# To present human readable horizontally
if len(tickets) == 1:
ticket_list.append({})
return ticket_list
|
def ensure_support_staging_jobs_have_correct_keys(
support_and_staging_matrix_jobs: list, prod_hub_matrix_jobs: list
) -> list:
"""This function ensures that all entries in support_and_staging_matrix_jobs have
the expected upgrade_staging and eason_for_staging_redeploy keys, even if they are
set to false/empty.
Args:
support_and_staging_matrix_jobs (list[dict]): A list of dictionaries
representing jobs to upgrade the support chart and staging hub on clusters
that require it.
prod_hub_matrix_jobs (list[dict]): A list of dictionaries representing jobs to
upgrade production hubs that require it.
Returns:
support_and_staging_matrix_jobs (list[dict]): Updated to ensure each entry has
the upgrade_staging and reason_for_staging_redeploy keys, even if they are
false/empty.
"""
# For each job listed in support_and_staging_matrix_jobs, ensure it has the
# upgrade_staging key present, even if we just set it to False
for job in support_and_staging_matrix_jobs:
if "upgrade_staging" not in job.keys():
# Get a list of prod hubs running on the same cluster this staging job will
# run on
hubs_on_this_cluster = [
hub["hub_name"]
for hub in prod_hub_matrix_jobs
if hub["cluster_name"] == job["cluster_name"]
]
if hubs_on_this_cluster:
# There are prod hubs on this cluster that require an upgrade, and so we
# also upgrade staging
job["upgrade_staging"] = "true"
job[
"reason_for_staging_redeploy"
] = "Following prod hubs require redeploy: " + ", ".join(
hubs_on_this_cluster
)
else:
# There are no prod hubs on this cluster that require an upgrade, so we
# do not upgrade staging
job["upgrade_staging"] = "false"
job["reason_for_staging_redeploy"] = ""
return support_and_staging_matrix_jobs
|
def weighted_avg_forecast(source: list, window_size: int, num: int, weight: float=1.0):
"""
Forecast new values from a list of existing values using a weighted average delta formula.
Noticeably faster than linear regression while producing similar results.
Arguments:
source: List of data to forecast from (integer or floating point).
window_size: The number of elements to use from the end of 'source' for prediction.
num: The number of elements to forecast.
weight: Amount of weight to give to the elements closest to the predicted data. Values less than 1.0 will
give the most weight to the farthest elements. Values exceeding 2.0 or -2.0 will increasingly
give no weight to the farthest and nearest elements, respectively.
Returns:
list(float): A list of the additional predicted data.
"""
assert num <= window_size, "Cannot forecast more elements than window size."
assert len(source) >= window_size, ("Source data length {} is less than window size {}."
.format(len(source), window_size))
result = []
weight_step = (weight - (1.0 + (1.0 - weight))) / window_size
for index in range(num):
delta_avg = 0.0
# Include any previously forecasted data.
for window_index in range(index):
if window_index == 0:
sample_2 = source[-1]
sample_1 = result[0]
else:
sample_2 = result[-(window_index + 1)]
sample_1 = result[-(window_index)]
sample_delta = sample_1 - sample_2
weight_amount = (weight - (weight_step * window_index))
if (weight_amount) < 0: weight_amount = 0
delta_avg += sample_delta * weight_amount
# Include remaining source data.
for window_index in range(window_size - index):
sample_delta = (source[-(window_index + 1)] - source[-(window_index + 2)])
weight_amount = (weight - (weight_step * (window_index + index)))
if (weight_amount) < 0: weight_amount = 0
delta_avg += sample_delta * weight_amount
last_sample = source[-1] if index == 0 else result[-1]
delta_avg /= window_size
result.append(last_sample + delta_avg)
return result
|
def reuss_avg(M1, M2, f1, f2):
"""
Reuss average for a 2 mineral mixture
Usage:
M = reuss_avg(M1, M2, f1, f2)
Inputs:
M1 & M2 = Elastic moduli for each phase (float)
f1 & f1 = Volume fraction of each phase (float)
Output:
M = Reuss average of elastic moduli (float)
"""
M = (f1/M1 + f2/M2)**-1
return M
|
def to_nested_tuples(item):
"""Converts lists and nested lists to tuples and nested tuples.
Returned value should be hashable.
"""
if isinstance(item, list):
return tuple([to_nested_tuples(i) for i in item])
else:
return item
|
def clean(text):
"""
clean text for creating a folder
"""
return "".join(c if c.isalnum() else "_" for c in text)
|
def logistic(x, params):
"""Evaluate logistic curve equation using log-transformed x_0.
Parameters
----------
x
X-value at which to evaluate the logistic function.
params : list
* einf: maximum Y-value (effect)
* log10_mid: log10-transformed X-value of midpoint
* slope: Steepness of curve
Returns
-------
float
Y-value of logistic function.
"""
einf, log10_mid, slope = params
emin = 1.0
mid = 10 ** log10_mid
return ( (emin-einf) / (1 + ((x/mid)**slope) ) ) + einf
|
def _always_together(nums, arr):
"""Check if nums always appear together in elements of arr."""
return all(set(nums).issubset(set(el)) or set(nums).isdisjoint(set(el))
for el in arr)
|
def alfa_choice(x, dx, eps=1e-2, alfa=0.1, method='decm'):
"""
:param dv numpy.ndarray:
:param eps float:
:param alfa float:
:param method str:
:return:
:rtype: float
"""
if method == 'decm':
alfa0 = (eps-1)*x/dx
for a in alfa0:
if a>=0:
alfa = min(alfa, a)
else:
alfa = 1
return alfa
|
def detect_override(stamp1, stamp2):
"""Determine if two stamps differ in a way that means manual override.
When two stamps differ at all, that means the source is dirty and so we
need to rebuild. If they differ in mtime or size, then someone has surely
edited the file, and we don't want to trample their changes.
But if the only difference is something else (like ownership, st_mode,
etc) then that might be a false positive; it's annoying to mark as
overridden in that case, so we return False. (It's still dirty though!)
"""
if stamp1 == stamp2:
return False
crit1 = stamp1.split('-', 2)[0:2]
crit2 = stamp2.split('-', 2)[0:2]
return crit1 != crit2
|
def is_palindrome(x):
"""
:param x: integer
:return: True if x is a Palindrome
False if x is not a Palindrome
"""
rev = list(reversed(str(x)))
if list(str(x)) == rev:
return True
else:
return False
|
def getargs(command):
"""Split off arguments from command"""
args = command.split(' ')
command = args[0].lower()
args = args[1:]
return command, args
|
def colspan(text, cs=2):
"""Format colspan for text in wikitext table markup"""
text = str(text)
if '|' not in text[1:]:
return f"| colspan='{cs}' | {text}\n"
else:
parts = text.split('|')
return f"|{parts[1]}colspan='{cs}' | {parts[-1]}"
|
def m2f(note):
"""Convert MIDI note number to frequency in Hertz.
See https://en.wikipedia.org/wiki/MIDI_Tuning_Standard.
"""
return 2 ** ((note - 69) / 12) * 440
|
def image_without_seam(im, s):
"""
Given a (color) image and a list of indices to be removed from the image,
return a new image (without modifying the original) that contains all the
pixels from the original image except those corresponding to the locations
in the given list.
"""
image = im
result = {
'height': image['height'],
'width': image['width'] - 1,
'pixels': image['pixels'],
}
for i in s:
del result['pixels'][i]
return result
|
def flattendict(nesteddict, sep=None, _prefix=None):
"""
Flatten nested dictionary
**Example**::
>>> sc.flattendict({'a':{'b':1,'c':{'d':2,'e':3}}})
{('a', 'b'): 1, ('a', 'c', 'd'): 2, ('a', 'c', 'e'): 3}
>>> sc.flattendict({'a':{'b':1,'c':{'d':2,'e':3}}}, sep='_')
{'a_b': 1, 'a_c_d': 2, 'a_c_e': 3}
Args:
d: Input dictionary potentially containing dicts as values
sep: Concatenate keys using string separator. If ``None`` the returned dictionary will have tuples as keys
_prefix: Internal argument for recursively accumulating the nested keys
Returns:
A flat dictionary where no values are dicts
"""
output_dict = {}
for k, v in nesteddict.items():
if sep is None:
if _prefix is None:
k2 = (k,)
else:
k2 = _prefix + (k,)
else:
if _prefix is None:
k2 = k
else:
k2 = _prefix + sep + k
if isinstance(v, dict):
output_dict.update(flattendict(nesteddict[k], sep=sep, _prefix=k2))
else:
output_dict[k2] = v
return output_dict
|
def just_load_sets(load_list):
"""Remove additional metadata from load list leaving just the load_set"""
return [x[2] for x in load_list]
|
def drop_repeats_with(f, xs):
"""Returns a new list without any consecutively repeating elements. Equality is
determined by applying the supplied predicate to each pair of consecutive elements. The
first element in a series of equal elements will be preserved.
Acts as a transducer if a transformer is given in list position"""
out = [xs[0]]
for i in range(0, len(xs) - 1):
if not f(*xs[i : i + 2]):
out.append(xs[i + 1])
return out
|
def compress(string):
"""Creates a compressed string using repeat characters.
Args:
string_one: any string to be compressed.
Returns:
A compressed version of the input based on repeat occurences
Raises:
ValueError: string input is empty.
"""
if string:
current = ''
count = 0
temp = list()
for character in string:
if count == 0:
current = character
count += 1
elif character == current:
count += 1
elif character != current:
temp.append(current + str(count))
current = character
count = 1
temp.append(current + str(count))
return ''.join(temp)
raise ValueError('empty string input given')
|
def _getBetweenComments(lines, comments):
"""
find comments between to lines
example:
- module-name: "arcade"
# comment
data-files:
"""
for i, line in enumerate(lines):
if line.strip().startswith("#"):
comment = line
line2 = line
counter = 0
while True:
counter += 1
line2 = lines[i - counter]
if line2 != "" and not line2.strip().startswith("#"):
key = line2.strip()
break
counter = 0
while True:
counter += 1
line2 = lines[i - counter]
if line2.startswith("- module-name: "):
module_name = line2.replace("- module-name: ", "")
break
del lines[i]
data = {"key": key, "comment": comment, "type": "between"}
if module_name in comments:
comments[module_name].append(data)
else:
comments[module_name] = [data]
return lines, comments
|
def interface_lookup(interfaces, hwaddr, address_type):
"""Search the address within the interface list."""
for interface in interfaces.values():
if interface.get('hwaddr') == hwaddr:
for address in interface.get('addrs'):
if address.get('type') == address_type:
return address.get('addr')
|
def combine_reports(original, new):
"""Combines two gcov reports for a file into one by adding the number of hits on each line
"""
if original is None:
return new
report = {}
report['name'] = original['name']
report['source_digest'] = original['source_digest']
coverage = []
for original_num, new_num in zip(original['coverage'], new['coverage']):
if original_num is None:
coverage.append(new_num)
elif new_num is None:
coverage.append(original_num)
else:
coverage.append(original_num + new_num)
report['coverage'] = coverage
return report
|
def to_dict(value):
"""Recursively transform a class from `config.models` to a dict."""
if hasattr(value, 'as_dict'):
return value.as_dict()
if isinstance(value, list):
return [
to_dict(v)
for v in value
]
if isinstance(value, dict):
return {
k: to_dict(v)
for k, v in value.items()
}
return value
|
def find_common_elements(list1: list, list2: list) -> list:
"""This function takes as input two lists and returns a list with the common elements
Args:
list1 (list): first list
list2 (list): second list
Returns:
intersection_as_list (list): list containing the common elements between the two input lists
"""
list1_as_set = set(list1) # type: set
intersection = list1_as_set.intersection(list2) # type: set
intersection_as_list = list(intersection) # type: list
return intersection_as_list
|
def arff_to_csv(arff_file, output_csv_file=None):
"""
Convert an arff file to a row.
Column headers include lines that start with '@attribute ',
include 'numeric', and whose intervening string is not exception_string.
The function raises an error if the number of resulting columns does
not equal the number of numeric values.
Example input: arff output from openSMILE's SMILExtract command
Adapted some formatting from:
http://biggyani.blogspot.com/2014/08/
converting-back-and-forth-between-weka.html
Parameters
----------
arff_file : string
arff file (full path)
output_csv_file : string or None
output table file (full path)
Returns
-------
row_data : Pandas Series
output table data
output_csv_file : string or None
output table file (full path)
Examples
--------
>>> from mhealthx.xtra import arff_to_csv
>>> arff_file = '/Users/arno/csv/test1.csv'
>>> output_csv_file = None #'test.csv'
>>> row_data, output_csv_file = arff_to_csv(arff_file, output_csv_file)
"""
import pandas as pd
if arff_file is None:
row_data = None
output_csv_file = None
else:
try:
# String not included as a header:
exception_string = 'class'
# Remove items from the left and right in the '@data' row:
data_from_left = 1
data_from_right = 1
# Open arff_file:
with open(arff_file, 'r') as fid:
lines = fid.readlines()
# Loop through lines of the arff file:
columns = []
first_numeric = False
for iline, line in enumerate(lines):
if '@data' in line:
break
else:
# If line starts with '@attribute' and contains 'numeric',
# append intervening string as a column name,
# and store index as first line of column names:
if line.startswith('@attribute ') and 'numeric' in line:
if '{' in line:
interstring = line[11:line.index('{') - 1]
else:
interstring = line[11:line.index('numeric') - 1]
# If intervening string between '@attribute '
# and 'numeric' is not the exception_string,
# include as a column header:
if interstring != exception_string:
columns.append(interstring)
if not first_numeric:
first_numeric = True
# Else break if past first line
# that has '@attribute ' and 'numeric':
elif first_numeric:
break
# Remove left and right (first and last) data items:
data = lines[len(lines)-1].split(',')
data = data[data_from_left:-data_from_right]
if len(data) != len(columns):
raise Warning("arff file doesn't conform to expected format.")
# Construct a pandas Series:
row_data = pd.Series(data, index=columns)
# Save output_csv_file:
if output_csv_file:
row_data.to_csv(output_csv_file)
except:
row_data = None
output_csv_file = None
return row_data, output_csv_file
|
def overlap_validator(value):
"""
Supported tile overlap values: 32, 64
"""
if not value.isnumeric:
raise TypeError("Select a valid vlaue.")
return
elif int(value) in (32, 64):
return int(value)
else:
raise TypeError("Select a valid vlaue.")
|
def get_user_age_from_userid(users, userid):
"""
From the user id we will get an age.
I created a dictionary to find the right user records.
"""
byid = {}
for d in users:
n = d['id']
byid[n] = d
user = byid[userid]
return user['Age']
|
def spoonerism(words):
"""Convert a list of words formatted with the spoonerism technique.
:param words (list) - The list of words to operate on
:rtype words (list) - The updated list of words
>>> spoonerism(['foo', 'bar'])
>>> ['boo', 'far']
"""
"First: [f]oo [b]ar => boo far"
new_words = []
if len(words) < 2:
raise ValueError('Need more than one word to combine')
for k, word in enumerate(words):
try:
new_words.append('{}{} {}{}'.format(
words[k + 1][0], # 2nd word, 1st letter
word[1:], # 1st word, 2nd letter to end
word[0], # 1st word, 1st letter
words[k + 1][1:])) # 2nd word, 2nd letter to end
except IndexError:
continue
return new_words
|
def RPL_TRACECLASS(sender, receipient, message):
""" Reply Code 209 """
return "<" + sender + ">: " + message
|
def compute_deriv(poly):
"""
Computes and returns the derivative of a polynomial function. If the
derivative is 0, returns (0.0,).
Example:
>>> poly = (-13.39, 0.0, 17.5, 3.0, 1.0) # x^4 + 3x^3 + 17.5x^2 - 13.39
>>> print compute_deriv(poly) # 4x^3 + 9x^2 + 35^x
(0.0, 35.0, 9.0, 4.0)
poly: tuple of numbers, length > 0
returns: tuple of numbers
"""
len_poly = len(poly)
if len_poly > 0 and len_poly > 1:
exponent = len_poly-1
return compute_deriv(poly[:-1]) + (exponent*poly[-1],)
else:
return ()
|
def is_mangled_name_attempt(attribute):
"""Check if attribute attempts to conform to name mangling convention."""
return all([
attribute[0] == '_',
attribute[1] != '_',
]) and not any([
attribute.endswith('_'),
attribute.endswith('__')])
|
def create_name_keyword(naam: str) -> str:
"""Get a single keyword from the name field."""
# todo: fix this one: Albrecht (St), van
if len(naam.split(',')) >= 2:
return naam.split(',')[0]
elif len(naam.split('~')) >= 2:
return naam.split('~')[0]
elif len(naam.split(' ')) >= 2:
return naam.split(' ')[0]
else:
return naam
|
def _print_quantization_style(sqcc):
"""Only to be used with QCC and QCD segments."""
msg = '\n Quantization style: '
if sqcc & 0x1f == 0:
msg += 'no quantization, '
elif sqcc & 0x1f == 1:
msg += 'scalar implicit, '
elif sqcc & 0x1f == 2:
msg += 'scalar explicit, '
return msg
|
def add_complete(deck,card):
"""
@input | list (deck of cards
----------------------------------------------
@goal | add 4 of the same card, as specified
| by the 'card' parameter
----------------------------------------------
@output | a list (deck of cards), now with
| 4 additional cards
"""
deck.append(card)
deck.append(card)
deck.append(card)
deck.append(card)
return deck
|
def init_grid(size=3):
"""Returns an empty Tic-tac-toe grid with measurements size x size"""
# make nested list with list comprehension
return [[' ' for col in range(size)] for row in range(size)]
|
def should_update(iteration, epoch, settings):
"""
Tells whether it is time to update the plateaus or not
:param iteration: iteration number
:param epoch: epoch number
:param settings: settings dictionary
:return: True if it is time for an update, and False otherwise
"""
no_update = False if 'no_update' not in settings else settings['update']['no_update']
if no_update:
return False
return epoch == settings['update']['start_epoch'] or \
(epoch > settings['update']['start_epoch'] and epoch % settings['update']['frequency'] == 0) or \
(settings['update_first_iteration'] and iteration == 0)
|
def get_indent_size(line: str) -> int:
"""Returns the size of indent"""
return line.find(line.lstrip())
|
def dot(a, b):
"""
Computes the dot product of a and b
"""
return a[0] * b[0] + a[1] * b[1]
|
def get_last_ter_line(model_lines):
"""Gets the index of the last TER record in a list of records. 0 will be
returned if there are none.
:param list model_lines: the lines to search.
:rtype: ``int``"""
last_ter = 0
for index, line in enumerate(model_lines[::-1]):
if line[:3] == "TER":
last_ter = len(model_lines) - index - 1
break
return last_ter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.