content stringlengths 42 6.51k |
|---|
def SummarizeResources(res_dict):
"""Summarizes the name of resources per resource type."""
result = {}
for res in res_dict:
result.setdefault(res['type'], []).append(res['name'])
return result |
def reverseWords(string):
""" reverse_words == PEP8 (forced mixedCase by CodeWars) """
return ' '.join(reversed(string.split())) |
def firehose_data_bucket(config):
"""Get the bucket name to be used for historical data retention
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
string|bool: The bucket name to be used for historical data retention. Returns
False if firehose is not configured
"""
# The default name is <prefix>-streamalert-data but can be overridden
firehose_config = config['global']['infrastructure'].get('firehose')
if not firehose_config:
return False
if not firehose_config.get('enabled'):
return False
return firehose_config.get(
'bucket_name',
'{}-streamalert-data'.format(config['global']['account']['prefix'])
) |
def _escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
for chars, entity in list(entities.items()):
data = data.replace(chars, entity)
return data |
def escape_chars(text, chars):
"""Helper function to escape uncomfortable characters."""
text = str(text)
chars = list(set(chars))
if '\\' in chars:
chars.remove('\\')
chars.insert(0, '\\')
for ch in chars:
text = text.replace(ch, '\\' + ch)
return text |
def ensureTuple(value):
"""
Function that wraps an input value in a tuple if it is not already a tuple
"""
if isinstance(value, tuple):
value_tuple = value
else:
value_tuple = (value,) # comma creates the tuple
return value_tuple |
def str2bool(val):
"""Check if str is true boolean."""
return val.lower() in ("yes", "true", "t", "1") |
def bindata(n, lst, v=1):
"""
Bins a list of values into bins of size *n*.
**Parameters**
n: *int*
Number of values to bin together. e.g. ``n = 4`` would bin the first four values into a single value, then the next 4, etc.
lst: *list*
List of values to bin.
v: *int* or *float*, optional
Bin scalar. The calculated bin values will be divided by this value. e.g. if ``n = v`` the output values will be an average of each bin.
**Returns**
binned list: *list*
A list of binned values.
**Notes**
- If the list is not divisible by `n`, the final bin will not be included in the output list. (The last values will be discarded.)
"""
out = []
delta = 0
ttemp = 0
for ind, val in enumerate(lst):
delta += 1
ttemp += val # add current value to growing sum
if delta == n: # critical number is reached
out.append(ttemp / float(v)) # append sum to list
delta = 0 # reset critical count and sum
ttemp = 0
return out |
def new_xform(val, reverse=False):
"""
Mock transformation to replace existing xforms in KEY_TRANSFORMS
:param val: Any value.
:param reverse: 'reverse' place holder for conversion methods
:return: (1, 1)
"""
if reverse:
return 1
return 1, 1 |
def generate_gt_string(tokens):
"""
Given a list of GT labels corresponding to a single event, convert them to a string formatted according to
Twitter-Event-Data-2019 GT format.
parameters
-----------
:param tokens: list
:return: str
"""
str = ""
for duplicate in tokens:
if str and str[-1] == "]":
str = str + "|"
for label in duplicate:
str = str + "["
for element in label:
if str[-1] == "[":
str = str + element
else:
str = str + "," + element
str = str + "]"
return str |
def _getLocationWords(location, words_index):
"""
Retrieves the words found at the passage location
:param location: The passage location e.g. book/chapter/verse without z-padding
:param words_index:
:return: a list of words
"""
if location in words_index:
return words_index[location]
else:
return [] |
def _moder_input(nin, nout, **kwargs):
"""
Write moder input.
Parameters
----------
nin : `int`
tape number for input file
nout : `int`
tape number for output file
Returns
-------
`str`
moder input text
"""
text = ["moder"]
text += [f"{nin:d} {nout:d} /"]
return "\n".join(text) + "\n" |
def get_col_idx(i):
"""
define some colors for plotting
"""
cols = 'rkgbmckrgbmckrkgbmckrgbmckrkgbmckrgbmck'
return cols[i] |
def is_distributed_table(v):
"""Determine if an object is a DistributedTable."""
return v.__class__.__name__ in ("DistributedTable",
"RestoredDistributedTable") |
def aggregate(state, q, aggs, facets):
"""Generate aggregations, a generalized way to do facetting."""
for facet in facets:
aggs.update({facet: {
'terms': {'field': facet, 'size': state.facet_size}}
})
return aggs |
def transform_chi_eff_chi_a_s1zs2z(mass1, mass2, chi_eff, chi_a):
"""Returns spin1z.
"""
spin1z = (mass1 + mass2) / (2.0 * mass1) * (chi_eff - chi_a)
spin2z = (mass1 + mass2) / (2.0 * mass2) * (chi_eff + chi_a)
return spin1z,spin2z |
def to_marker_edge(marker_size, marker):
"""
get edge shrink_target
:param marker_size: int
:param marker: default 'o'
:return: float
"""
if marker in "s^>v<d": # `large` markers need extra space
return pow(2*marker_size,0.5)/2
else:
return pow(marker_size,0.5)/2 |
def findMyIndex(syllableCounter, phonemeCounter, seq):
"""
:param syllableCounter:
:param phonemeCounter:
:param seq:
:return:
"""
total = 0
n = 0
while n < (syllableCounter - 1):
total += len(seq[n])
n += 1
total += phonemeCounter
return (total - 1) |
def cons_merge(car_part, cdr_part):
"""Merge a car with a list or tuple cdr."""
return type(cdr_part)([car_part]) + cdr_part |
def read_float_with_comma(num):
"""Helper method to parse a float string representation that has
a comma as decimal separator.
Can't use locale as the page being parsed could not be in the
same locale as the python running environment
Args:
num (str): the float string to parse
Returns:
float: the parsed float
"""
return float(num.replace(",", ".")) |
def normalise_round(val, minimum, maximum):
"""
Normalise a value between two values and round it to nearest integer
:param val: value to normalise
:param minimum: minimum boundary
:param maximum: maximum boundary
:return: integer value
"""
return round((val - minimum) / float(maximum - minimum)) |
def repo_name_from_uri(uri):
"""
Given a valid and complete URI, return just the repo name portion. Does no validation as to whether this is a
valid repo or uri.
:param uri:
The valid and complete URI.
:return:
A repo name.
"""
return uri.split(":/")[0] |
def gen_state_string(l):
"""Given a list of strings, generate the latex '\\vert {} \\rangle' representation of this in superposition."""
result = ""
str_template = r"\vert {} \rangle"
for idx,i in enumerate(l):
result += r"b_{{{}}}\vert \textrm{{{}}} \rangle ".format(idx, i[0])
if i != l[-1]:
result += " + "
return result |
def reduce_by_multiple(trajectory, integer):
"""
keep only control points in given trajectory which are multiple of @integer
:param trajectory: array that describes the trajectory
:param integer: the multiple used for reducing
:return: the reduced trajectory
"""
reduced = trajectory[0:len(trajectory) - 1:integer]
if len(reduced) == 0:
return trajectory
if reduced[-1] != trajectory[len(trajectory) - 1]:
reduced.append(trajectory[len(trajectory) - 1])
return reduced |
def category_table_build(osm_table_name, categorysql):
"""
Returns an SQL OSM category table builder
Args:
osm_table_name (string): a OSM PostGIS table name
Returns:
sql (string): a sql statement
"""
sql = ("INSERT INTO category_%s (osm_id, cat) "
"SELECT DISTINCT osm_id, "
"%s as cat "
"FROM %s as osm")
sql = sql % (osm_table_name, categorysql, osm_table_name)
return sql |
def whisper(text):
"""
whisper
:param text:
:return:
"""
return text.lower() + '...' |
def getFirstPlist(byteString):
"""Gets the next plist from a byte string that may contain one or
more text-style plists.
Returns a tuple - the first plist (if any) and the remaining
string after the plist"""
plist_header = b'<?xml version'
plist_footer = b'</plist>'
plist_start_index = byteString.find(plist_header)
if plist_start_index == -1:
# not found
return (b"", byteString)
plist_end_index = byteString.find(
plist_footer, plist_start_index + len(plist_header))
if plist_end_index == -1:
# not found
return (b"", byteString)
# adjust end value
plist_end_index = plist_end_index + len(plist_footer)
return (byteString[plist_start_index:plist_end_index],
byteString[plist_end_index:]) |
def list_prod(lst):
"""
Calculate the product of all numbers in a python list.
"""
prod = 1
for itm in lst:
prod *= itm
return prod |
def split_model_name(model_name):
"""
Split model name with ':'
Args:
model_name: the original model name
Returns: source name, and the model name
"""
model_split = model_name.split(":", 1)
if len(model_split) == 1:
return "", model_split[0]
else:
source_name, model_name = model_split
assert source_name in ("satflow", "hf_hub")
return source_name, model_name |
def _has_all_keys(op_dict, all_ops):
"""Check all keys."""
return all([k in op_dict.keys() for k in all_ops]) and len(all_ops) == len(op_dict) |
def concat_commands(commands):
"""Join UNIX commands with concat operator"""
return '; '.join(commands) |
def translate(text):
"""
Translate text into pig latin.
:param text string - text to translate.
:return string = translated text.
"""
translated = ""
vowels = "aeiou"
vowel_sounds = ["xr", "yt"]
for word in text.split(" "):
translated_word = ""
if word[0] in vowels or word[0:2] in vowel_sounds:
translated_word = word + "ay"
else:
for index, char in enumerate(word):
if index == 0:
continue
if char == "u" and word[index - 1] == "q":
translated_word = word[index + 1::] + word[0:index + 1] + "ay"
break
elif char in vowels or char == "y":
translated_word = word[index::] + word[0:index] + "ay"
break
translated += " " + translated_word
return translated.strip() |
def only_existing(l, haystack):
"""
Helper to filter elements not already on the haystack in O(n)
"""
s = set(haystack)
return [item for item in l if item in s] |
def calculate_token_freq(
token,
file_tokens
) -> int:
"""
Calculate the token frequency in the file corpus
:type token: str
:type file_tokens: list
"""
counter = 0
for file_token in file_tokens:
if token == file_token:
counter += 1
return counter |
def parse_helm_list_output(helm_list_output):
"""
Function will perform a manipulation on a string output from the 'helm list' command
Returns an array of dicts with installed chart names and namespaces as strings
as [{'chart_name': 'some_chart_name', 'name_space': 'some_name_space'}]
by validating the first line, splitting by the tab delimiter,
and checking that the first (0) value is 'NAME' and seventh (6) value is 'NAMESPACE'
an exception will be raised if the structure was change by HELM developers
:param helm_list_output: 'helm list' output as String
:return: list of dicts
"""
installed_charts = []
# split helm_list_output by 'new line'
installed_helm_stdout = helm_list_output.split("\n")
# Perform validation on stdout of first (0) line
first_line_stdout = installed_helm_stdout[0].split("\t")
if first_line_stdout[0].strip() != 'NAME' or first_line_stdout[6].strip() != 'NAMESPACE':
raise Exception("'helm list' command output changed, probably due to helm version update, "
"code change is needed to resolve this issue, "
"contact the developer.")
# for every line in installed charts, excluding the headers line (Name, Revision, Updated etc...)
for line in installed_helm_stdout[1:]:
# each stdout 'helm list' line composed by tabs delimiter, split it
chart_details = line.split("\t")
temp_dictionary = {}
if chart_details[0] != "":
# Add current line chart values to dict
temp_dictionary.update({'chart_name': chart_details[0].strip()})
temp_dictionary.update({'name_space': chart_details[6].strip()})
# Update final array with the temp array of dicts of current helm deployment
installed_charts.append(temp_dictionary)
return installed_charts |
def array_strip_units(data):
""" strip the units of a quantity """
try:
return data.magnitude
except AttributeError:
return data |
def solution(number):
""" Thanks to 'M.Gaidamaka' from CodeWars """
number -= 1
a = number // 3
b = number // 5
c = number // 15
return [a - c, b - c, c] |
def mod_by_AES(hex_num):
""" Here we take a hex_num and we mod it by the AES irreducible.
This is a helper method to the russian_peas method
"""
AES_IRREDUCIBLE = 0x11A
if hex_num > 0xFF:
hex_num = hex_num % AES_IRREDUCIBLE
return hex_num |
def bool_to_int(matrix):
"""
I find it much easier to read 0s and 1s than boolean words, so I wrote this helper function.
:param matrix: A matrix to translate to integers.
:return: A matrix of integers.
"""
for row in range(len(matrix)):
for col in range(len(matrix[0])):
matrix[row][col] = int(matrix[row][col])
return matrix |
def pyfloat(v_str):
"""Convert string repr of Fortran floating point to Python double."""
# NOTE: There is no loss of information from SP to DP floats
return float(v_str.lower().replace('d', 'e')) |
def construct_url(ip_address: str) -> str:
"""Construct the URL with a given IP address."""
if "http://" not in ip_address and "https://" not in ip_address:
ip_address = "{}{}".format("http://", ip_address)
ip_address = ip_address.rstrip("/")
return ip_address |
def base_to_str( base ):
"""Converts 0,1,2,3 to A,C,G,T"""
if 0 == base: return 'A'
if 1 == base: return 'C'
if 2 == base: return 'G'
if 3 == base: return 'T'
raise RuntimeError( 'Bad base: %d' % base ) |
def clean_white_spaces(string):
"""Gets rid of white spaces in the string
:param string: string to be processed
"""
try:
# in case it is in byte value
string = string.decode('utf-8')
except:
pass
res = ''
words = string.split()
for word in words:
res = res + str(word)
return res |
def _get_units(intuple):
"""
Extract Units from metadata.
>>> _get_units([("fmap.nii.gz", {"Units": "rad/s"})])
'rad/s'
>>> _get_units(("fmap.nii.gz", {"Units": "rad/s"}))
'rad/s'
"""
if isinstance(intuple, list):
intuple = intuple[0]
return intuple[1]["Units"] |
def build_fullauth_config(settings):
"""Build fullauth configuration dictionary."""
fullauth_config = {
"authtkt": {
"secret": settings.get("fullauth.authtkt.secret", "fullauth_psst"),
"hashalg": settings.get("fullauth.authtkt.hashalg", "sha512"),
},
"session": {
"factory": "pyramid.session.SignedCookieSessionFactory",
"settings": {"secret": "THATS_NOT_SECRET_ITS_A_SECRET"},
},
"register": {"password": {"require": True, "length_min": 6, "confirm": True}},
"redirects": {"logout": False},
"login": {"cookie_max_age": 2592000}, # 30 days
}
fullauth_settings = {s: settings[s] for s in settings if s.startswith("fullauth")}
for setting_key, setting_value in fullauth_settings.items():
key_parts = setting_key.split(".")
key_length = len(key_parts)
if key_parts[1] == "register" and key_length == 4:
if key_parts[2] == "password":
fullauth_config["register"]["password"][key_parts[3]] = setting_value
elif key_parts[1] == "authtkt" and key_length == 3:
fullauth_config["authtkt"][key_parts[2]] = setting_value
elif key_parts[1] == "login" and key_length == 3:
fullauth_config["login"][key_parts[2]] = setting_value
elif key_parts[1] == "session":
if key_parts[2] == "factory" and key_length == 3:
fullauth_config["session"]["factory"] = setting_value
elif key_parts[2] == "settings" and key_length == 4:
fullauth_config["session"]["settings"] = setting_value
elif key_parts[1] == "social" and key_length == 4:
if "social" not in fullauth_config:
fullauth_config["social"] = {}
if key_parts[2] not in fullauth_config["social"]:
fullauth_config["social"][key_parts[2]] = {}
fullauth_config["social"][key_parts[2]][key_parts[3]] = setting_value
return fullauth_config |
def shortened_interface(name):
"""Condenses interface name. Not canonical - mainly for brevity"""
name = name.replace("GigabitEthernet", "ge")
name = name.replace("0/0/0/", "")
return name |
def normalize_baseuri(baseuri: str) -> str:
"""Normalize a baseuri
If it doesn't end in a slash, add one.
"""
if baseuri[-1] != "/":
return baseuri + "/"
return baseuri |
def primes(num):
"""
Get a list of a desired count of prime numbers.
:param num: Number of prime numbers (N).
:type num: short, int, long
:return: List of prime numbers, readable in Python.
:rtype: list
>>> primes(10)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
prime_list = [0]*num
k = 0
n = 2
while k < num:
iterator = 0
while iterator < k and n % prime_list[iterator] != 0:
iterator += 1
if iterator == k:
prime_list[k] = n
k += 1
n += 1
return prime_list |
def user_info(
first_name,
last_name,
**profile):
"""Function that builds user information."""
profile['firstname'] = first_name
profile['lastname'] = last_name
return profile |
def _read_to_str(path):
"""Read a file into a string"""
with open(path, "r") as f:
return f.read() |
def valid_block(top_row, mid_row, bot_row, pos):
"""
Is the 3x3 pattern at a given position of a 3-row rectangle valid?
I.e. does it map to 0 in GoL?
"""
mid_cell = mid_row[pos+1]
ring_sum = sum(top_row[pos:pos+3]) + mid_row[pos] + mid_row[pos+2] + sum(bot_row[pos:pos+3])
if mid_cell:
return ring_sum < 2 or ring_sum > 3
else:
return ring_sum != 3 |
def linearSearch(myList, objetive, iter_lin=0):
"""
Searches in the list, the objetive number in a iteratively way.
"""
match = False
for element in myList:
iter_lin += 1
if element == objetive:
match = True
break
return (match, iter_lin) |
def get_duplicates(iterable):
"""Return a set of the elements which appear multiple times in iterable."""
seen, duplicates = set(), set()
for elem in iterable:
if elem in seen:
duplicates.add(elem)
else:
seen.add(elem)
return duplicates |
def get_items_markdown(items, indent=""):
"""Return a list of NEWS entries as a Markdown list"""
return "".join([indent + "* %s\n" % item for item in items]) |
def kvret_get_constructed_history_and_golden_response(usr_utterances, sys_utterances):
"""
This function construct the reversed order concat of dialogue history from dialogues from users and system.
as well as the last response(gold response) from user.
@param usr_utterances:
@param sys_utterances:
@return:
"""
# "[prefix] [utterance n] || [sys_utterance n-1] [utterance n-1] | [sys_utterance n-2] [usr_utterance n-2] | ..."
assert len(usr_utterances) == len(sys_utterances)
reversed_utterance_head = [sys_utt.strip() + " | " + usr_utt.strip() for sys_utt, usr_utt in
zip(reversed(sys_utterances[:-1]), reversed(usr_utterances[:-1]))]
reversed_utterance_head_str = " | ".join(reversed_utterance_head)
return usr_utterances[-1].strip() + " || " + reversed_utterance_head_str, sys_utterances[-1] |
def sql_concurrently(index):
"""
Is the index concurrently or not
return : string
"""
res = "CONCURRENTLY"
if "concurrently" in index:
if index["concurrently"] is False:
res = ""
return res |
def format_floats(data):
"""Returns formatted list of floats from unformatted list of floats
Parameters
----------
data : list
The list of floats to be formatted
Returns
-------
list
Formatted floats (now string).
"""
return ['{0:.3f}'.format(elem) for elem in data] |
def paginate(items, items_per_page):
"""Splits `items` list into lists of size `items_per_page`.
>>> paginate([1, 2, 3], 1)
[[1], [2], [3]]
>>> paginate([1, 2, 3, 4, 5, 6, 7], 3)
[[1, 2, 3], [4, 5, 6], [7]]
>>> paginate([], 3)
[]
"""
result = []
for i in range(0, len(items), items_per_page):
result.append(items[i:i + items_per_page])
return result |
def html(content, **kwargs):
"""HTML (Hypertext Markup Language)"""
if hasattr(content, "read"):
return content
elif hasattr(content, "render"):
return content.render().encode("utf8")
return str(content).encode("utf8") |
def solution1(n):
"""
This function returns list of prime factors.
(This is my own solution... but it is complicated and slow)
"""
def find_next_prime_factor(startIndex):
"""
Find the next valid prime factor with given startIndex(inclusive)
"""
while True:
for y in range(2, startIndex):
if not startIndex % y:
break
else:
return startIndex
startIndex += 1
testNum = n
factorNum = 1
result = []
while True:
factorNum = find_next_prime_factor(factorNum + 1)
while not testNum % factorNum:
testNum /= factorNum
result.append(factorNum)
if testNum == 1:
break
else:
continue
break
return(max(result)) |
def move_list_item(mod_list, item, new_pos):
"""Reposition an item in a list"""
if item in mod_list:
old_pos = mod_list.index(item)
mod_list.pop(old_pos)
mod_list.insert(new_pos, item)
return mod_list |
def floor2(i):
"""Find greatest even integer less than or equal to given integer.
Arguments:
i (int): starting integer
Returns:
(int): greatest even integer
"""
return i - (i%2) |
def sort_objects_by_score(objects, reverse=True):
"""
Put any set of objects in order from high score to low score.
"""
if reverse:
sign = -1
else:
sign = 1
return sorted(objects, key=lambda k: sign*k['score']) |
def mmirror4(matrix):
"""Do a 4-way mirroring on a matrix from top-left corner."""
width = len(matrix[0])
height = len(matrix)
for i in range(height):
for j in range(width):
x = min(i, height - 1 - i)
y = min(j, width - 1 - j)
matrix[i][j] = matrix[x][y]
return matrix |
def check_fields(default_dict, new_dict):
"""
Return the dictionary with default keys and values updated by using the information of ``new_dict``
:param dict default_dict:
Dictionary with default values.
:param dict new_dict:
Dictionary with new values.
:return: dict.
"""
# Check if the entered variable has different values
for key in list(set(default_dict.keys()) & set(new_dict.keys())):
default_dict[key] = new_dict[key]
return default_dict |
def format_frequency(value: str) -> str:
"""Format frequency value to more human-readable form."""
return f"{value} " |
def format_color(r, g, b):
"""Format a color as a string.
r, g, b -- integers from 0 to 255
"""
return '#{0:02x}{1:02x}{2:02x}'.format(int(r * 255), int(g * 255), int(b * 255)) |
def has_new_status(metric, new_status: str, most_recent_measurement_seen: str) -> bool:
"""Determine if a metric got a new status after the timestamp of the most recent measurement seen."""
recent_measurements = metric.get("recent_measurements") or []
if len(recent_measurements) < 2:
return False # If there are fewer than two measurements, the metric didn't recently change status
metric_has_status = metric["status"] == new_status
scale = metric["scale"]
metric_had_other_status = recent_measurements[-2][scale]["status"] != new_status
change_was_recent = recent_measurements[-1]["start"] > most_recent_measurement_seen
return bool(metric_has_status and metric_had_other_status and change_was_recent) |
def is_json(metadata: str) -> bool:
"""Check whether the string is a json."""
return metadata[0] in ['{', '['] |
def centre_to_box(centre_width):
""" A function to convert from centre and width notation to start and stop notation (in both x and y directions).
Inputs:
centre_wdith
Returns:
History:
2020_10_28 | MEG | Wrote the docs.
"""
x_start = centre_width[0] - centre_width[2]
x_stop = centre_width[0] + centre_width[2]
y_start = centre_width[1] - centre_width[3]
y_stop = centre_width[1] + centre_width[3]
return [x_start, x_stop, y_start, y_stop] |
def find_defining_class(obj, method_name):
"""Finds and returns the class object that will provide
the definition of method_name (as a string) if it is
invoked on obj.
obj: any python object
method_name: string method name
"""
for ty in type(obj).mro():
if method_name in ty.__dict__:
return ty
return None |
def is_numlike(obj):
"""Return True if obj looks like a number or numerical array."""
try:
obj = obj + 1
except:
return False
# all cool
return True |
def is_sorted (l, ascending = True):
"""
Check whether array is sorted.
source: https://stackoverflow.com/questions/3755136/pythonic-way-to-check-if-a-list-is-sorted-or-not
:param l: list
:return: is sorted
"""
if ascending:
return all (l[i] <= l[i+1] for i in range (len (l)-1))
else:
return all (l[i] >= l[i+1] for i in range (len (l)-1)) |
def checkH(board, intX, intY, newX, newY):
"""Check if the horse move is legal, returns true if legal"""
tmp=False
if abs(intX-newX)+abs(intY-newY)==3:
if intX!=newX and intY!=newY:
tmp=True
return tmp |
def ktmetric(kt2_i, kt2_j, dR2_ij, p = -1, R = 1.0):
"""
kt-algorithm type distance measure.
Args:
kt2_i : Particle 1 pt squared
kt2_j : Particle 2 pt squared
delta2_ij : Angular seperation between particles squared (deta**2 + dphi**2)
R : Radius parameter
p = 1 : (p=1) kt-like, (p=0) Cambridge/Aachen, (p=-1) anti-kt like
Returns:
distance measure
"""
a = kt2_i**(2*p)
b = kt2_j**(2*p)
c = (dR2_ij/R**2)
return (a * c) if (a < b) else (b * c) |
def flatten_proposal_field(representation, allow_fields=[]):
"""
The helper function that used in `to_representation()` for
flattening the `proposal` object from serialized
`ProposedTalkEvent` or `ProposedTutorialEvent`.
"""
proposal_repr = representation.pop('proposal')
for key in proposal_repr:
if key in allow_fields or not allow_fields:
representation[key] = proposal_repr[key]
return representation |
def modus_ponens(p, q):
"""Implements the modus ponens logic table: p -> q"""
if p:
return q
else:
return not p |
def list2row(worksheet, row, col, values, positions=None):
"""
Create header of the template xlsx file
:param Worksheet worksheet: Worksheet class to write info
:param int row: Row number to start writing
:param int col: Column number to start writing
:param list values: List of values to write in a row
:param list positions: Positions for each value (otpional, if not given the
values will be printed after each other from column 0)
:return: the next position of cursor row,col
:rtype: tuple of (int,int)
"""
if not positions or len(positions) != len(values):
positions = range(len(values))
for val, pos in zip(values, positions):
#worksheet.write(row, col+pos, val)
worksheet.cell(row=row+1, column=col+pos+1, value=val)
return row+1, col |
def _score_for_model(meta):
""" Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in pipes:
mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3)
return sum(mean_acc) / len(mean_acc) |
def dumb_filter(line: str) -> bool:
"""Filters a Wikidata line that is a dictionary in string format.
Applies a simple check that tests if the currenty entity line has a English
Wikipedia article before loading. The reason is that loading a JSON object is slow
before running it, that speeds up code. Removing this function call should not
change the resulting file.
Arguments:
line: ``str`` A line in the Wikidata dump.
Returns:
``bool`` Whether the current line is for an entity with an English article
"""
return '"enwiki"' not in line and '"type":"item"' in line |
def align(value, alignment):
"""Align `value` upward towards the nearest multiple of `alignment`."""
return ((value + alignment - 1) // alignment) * alignment |
def calc_intersection_with_chroma_axis(inner_cusp, outer_cusp):
"""
calculate the intersection of the two cusps
and chroma axis in L*-Chroma plane.
Returns
-------
touple
(L*star, Chroma). It is the coordinate of the L_cusp.
"""
x1 = inner_cusp[1]
y1 = inner_cusp[0]
x2 = outer_cusp[1]
y2 = outer_cusp[0]
div_val = (y2 - y1)
x = x2 - (x2 - x1) / div_val * y2 if div_val != 0 else 0
return (0, x) |
def standardize(obj):
"""
Take any Deezer obj and return a standard object (e.g. artist or author keys are renammed as creator)
"""
if 'picture_medium' in obj.keys():
obj['image'] = obj.pop('picture_medium')
if 'cover_medium' in obj.keys():
obj['image'] = obj.pop('cover_medium')
return obj |
def t(milliseconds: int) -> str:
"""Inputs time in milliseconds, to get beautified time,
as string"""
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2] |
def _break(s, find):
"""Break a string s into the part before the substring to find,
and the part including and after the substring."""
i = s.find(find)
return s[:i], s[i:] |
def j2_quote(s):
"""Jinja2 custom filter that quotes a string
"""
return '"{}"'.format(s) |
def get_username(current_user):
"""
Return the current username. If the user already logged out, return None
"""
try:
return current_user.username
except:
return None |
def get_reformatted_target_dir(path: str):
"""Get reformatted target dir with trailing '/'.
Args:
path: (str): Original path.
Returns:
str: Reformatted path.
"""
if not path.endswith("/"):
path = path + "/"
return path |
def match(texts, substr):
"""Text match.
Ctrl/command + F for now.
"""
ret = []
for t in texts:
if substr in t:
ret.append(t)
return ret |
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
if type(number) != int:
return "INVALID INPUT, ENTER AN INTEGER!"
start, end = 0, number + 1
while start < end:
mid = start + (end - start)//2
if mid*mid == number :
return mid
elif (end - start) == 1:
return start
elif mid*mid > number:
end = mid
else:
start = mid |
def portrange2str(port_range):
"""port_range is a tuple of 2 ports
"""
if port_range[1]:
portstr = "port range %s-%s" % (port_range[0], port_range[1])
else:
portstr = "port %s" % (port_range[0],)
return portstr |
def get_missing_residues(uniprot_seq, pdb_seq):
"""
Comparing the structural and UniProt sequence to identify the different
residues.
Arguments:
parameter 1: The UniProt sequence.
parameter 2: The PDB sequence.
Returns:
This is a description of what is returned.
"""
# pdb_seq_index = 0
# results = []
# for uniprot_seq_index, l in enumerate(uniprot_seq):
# if pdb_seq[pdb_seq_index] is None:
# res = {"location": uniprot_seq_index,
# "residue": uniprot_seq[uniprot_seq_index]}
# continue
# if uniprot_seq[uniprot_seq_index] == pdb_seq[pdb_seq_index]:
# pdb_seq_index += 1
# else:
# res = {"location": uniprot_seq_index,
# "residue": uniprot_seq[uniprot_seq_index]}
# results.append(res)
# pdb_seq_index += 1
# return results
uniprot_seq_index = 0
pdb_seq_index = 0
results = []
for _ in range(len(uniprot_seq)):
# print('{}:{}'.format(uniprot_seq[uniprot_seq_index],
# pdb_seq[pdb_seq_index]))
# print('{}:{}'.format(uniprot_seq_index, pdb_seq_index))
if uniprot_seq[uniprot_seq_index] == pdb_seq[pdb_seq_index]:
# print('match')
uniprot_seq_index += 1
pdb_seq_index += 1
else:
d = {
"location": uniprot_seq_index,
"residue": uniprot_seq[uniprot_seq_index]
}
results.append(d)
uniprot_seq_index += 1
return results |
def _1(value):
"""
Integer decoder.
```python
>>> from Tyf import decoders
>>> decoders._1((1, ))
1
>>> decoders._1((1.0, "6", 0b111))
(1, 6, 7)
```
"""
return int(value[0]) if len(value) == 1 else tuple(int(v) for v in value) |
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches |
def map_1d_to_2d(index, N, M):
"""
Function: map_1d_to_2d\n
Parameter: index -> index which needs to be mapped to the 2D coordinates, N -> number of rows in the grid, M -> number of columns in the grid\n
Return: the location of the mapped index\n
"""
x = index // (N - 1)
y = index % M
return (x, y) |
def update_cv_validation_info(test_validation_info, iteration_validation_info):
"""
Updates a dictionary with given values
"""
test_validation_info = test_validation_info or {}
for metric in iteration_validation_info:
test_validation_info.setdefault(metric, []).append(iteration_validation_info[metric])
return test_validation_info |
def hflip_pattern(pattern):
"""Flip a pattern horizontally"""
newpattern = ["".join(reversed(j)) for j in pattern]
return newpattern |
def get_curr_max_virtual_slots_cmd(lparid=1):
"""
Get the current max virtual slots limit for the LPAR
:param lparid: LPAR ID. For IVM this parameter is not needed
:returns: A HMC command to get the maximum number of virtual slots
configurable on the VIOS.
"""
return ("lshwres -r virtualio --rsubtype slot --level lpar "
"--filter lpar_ids=%(lparid)s -F curr_max_virtual_slots" %
{'lparid': lparid}) |
def escape_ntfs_invalid(name):
"""
Escapes characters which are forbidden in NTFS, but are not in ext4.
:param name: Path potentially containing forbidden NTFS characters.
:return: Path with forbidden NTFS characters escaped.
"""
return name.replace('*', '#002A').replace('|', '#007C').replace(':', '#003A').replace('>', '#003E').replace('<', '#003C').replace('?', '#003F').replace('"', '#0022') |
def mock_platform_list_2():
"""System and version doesn't match with the mocked one"""
return [
{"name": "macos", "platform_info": {"system": "test1", "version": "test3"}},
{"name": "ubuntu", "platform_info": {"system": "test2", "version": "test4"}},
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.