content
stringlengths
42
6.51k
def brightness_to_rflink(brightness: int) -> int: """Convert 0-255 brightness to RFLink dim level (0-15).""" return int(brightness / 17)
def _parse_fit_and_predict_result(result): """Parse and infer whether fit_and_predict returns y or (y, scores).""" if len(result) > 1 and result[1] and not isinstance(result[1], str): # Scores object does not resemble a label prediction (always string) y = result[0] scores = result[1] else: y = result scores = None return y, scores
def sortedbytype(iterable): """Sorts an iterable by types first, then value.""" items = {} for x in iterable: t = type(x).__name__ if t not in items: items[t] = [] items[t].append(x) rtn = [] for t in sorted(items.keys()): rtn.extend(sorted(items[t])) return rtn
def check_patch_in_bounds(x, y, X_dim, Y_dim): """ Usage: TrueFalse = check_patch_in_bounds(x, y, X_dim, Y_dim) determine if the box is within the image Args: x: a tuple, list or array (x_start, x_end) y: a tuple, list or array (y_start, Y_end) X_dim: a tuple, list or array (Image_X_start, Image_X_end) Y_dim: a tuple, list or array (Image_Y_start, Image_Y_end) """ if x[0] > x[1] or y[0] > y[1] or X_dim[0] > X_dim[1] or Y_dim[0] > Y_dim[1]: return False if x[0] >= X_dim[0] and y[0] >= Y_dim[0] and x[1] < X_dim[1] and y[1] < Y_dim[1]: return True else: return False
def cache_max_age(hours): """String commonly used for Cache-Control response headers""" seconds = hours * 60 * 60 return 'max-age=' + str(seconds)
def choose_cat_fasta(fofn): """Given the contents of a fasta FOFN, return a command to write the contents of a fasta to stdout, keeping the original file. Raise Exception on error. >>> choose_cat_fasta('abc.gz') 'zcat ' >>> choose_cat_fasta('abc.dexta') 'undexta -vkU -w60 -i < ' >>> choose_cat_fasta('abc') 'cat ' """ first_line = fofn.splitlines()[0] if first_line.endswith('.gz'): return 'zcat ' elif first_line.endswith('.dexta'): return 'undexta -vkU -w60 -i < ' else: return 'cat '
def get_delay_loaded_timestamp(pefile_object): """ Retrieves the timestamp from the Delay Load Import Table directory. :param pefile.PE pefile_object: pefile object. :return: Recovered timestamps from PE imports (if any) as a list of tuples such that [(dllname, timestamp)]. :rtype: list """ delay_import_timestamps = [] if not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT'): return delay_import_timestamps for delayloaddata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT: dll_name = delayloaddata.dll try: dll_name = dll_name.decode('ascii') except UnicodeDecodeError: dll_name = dll_name.decode('ascii', errors='replace') timestamp = getattr(delayloaddata.struct, 'dwTimeStamp', 0) if timestamp: delay_import_timestamps.append((dll_name, timestamp)) return delay_import_timestamps
def is_dict(value): """ Checks if `value` is a ``dict``. Args: value (mixed): Value to check. Returns: bool: Whether `value` is a ``dict``. Example: >>> is_dict({}) True >>> is_dict([]) False .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Added :func:`is_dict` as main definition and made `is_plain_object`` an alias. .. versionchanged:: 4.0.0 Removed alias ``is_plain_object``. """ return isinstance(value, dict)
def getnested(instance, props, callback=None): """get nested property of instance This is a recursive function to access a nested property, e.g. for `instance.a.b.c`, props is `['a', 'b', 'c']`. `callback` is optionally applied to the final value. Parameters ---------- instance: instance that has a property `props[0]` props: nested property split into a list callback: is optionally applied to the output value """ if props: val = getattr(instance, props[0]) return getnested(val, props[1:], callback) return callback(instance) if callback else instance
def split_chunks(lst: list, size: int = 100) -> "list[list]": """Splits ids into list of list according to api limit `lst`: The list of ids `size`: Max length of inner list""" return [lst[i: i + size] for i in range(0, len(lst), size)]
def escapejson(string): """ Escape `string`, which should be syntactically valid JSON (this is not verified), so that it is safe for inclusion in HTML <script> environments and as literal javascript. """ replacements = ( # Replace forward slashes to prevent '</script>' attacks ('/', '\\/'), # Replace line separators that are invalid javascript. # See http://timelessrepo.com/json-isnt-a-javascript-subset/ (u'\u2028', '\\u2028'), (u'\u2029', '\\u2029'), ) for fro, to in replacements: string = string.replace(fro, to) return string
def _convertType(iterable, newType): """ Converts element type within iterable. """ iterType = type(iterable) return iterType([newType(i) for i in iterable])
def internalfunc(data1, data2): """Used by binleave. This function interleaves data2 into data1 a little chunk at a time.""" if len(data2) > len(data1): # make sure that data1 always has the longer string, making data2 the watermark dummy = data1 data1 = data2 data2 = dummy if not data2 or not data1: return None # check for empty data length = len(data2) if length >= pow(2,24): return None # if the strings are oversized multiple = len(data1)//length # this is how often we should interleave bits if multiple > 65535: multiple = 65535 # in practise we'll set to max 65535 header1 = length//65536 header3 = length % 65536 header2 = header3//256 header3 = header3 % 256 header = chr(header1) + chr(header2) + chr(header3) # these are the 3 bytes we will put at the start of the string # so - to encode one byte of data2 (the watermark) we need multiple bytes of data1 data1 = [ord(char) for char in list(data1)] startpos = 0 data2 = [ord(char) for char in list(data2)] BINLIST=[1,2,4,8,16,32,64,128] out = [] bitlen = multiple*8 + 8 # the total number of bits we'll have # print bitlen, multiple while data2: chunklist = data1[startpos:startpos + multiple] startpos = startpos + multiple heapobj = 0 mainobj = data2.pop(0) charobj = chunklist.pop(0) bitindex = 0 mainindex = 0 heapindex = 0 charindex = 0 while mainindex < bitlen: # print mainindex, heapindex, charindex, bitindex if heapindex == 8: # if we've got all 8 bit's out.append(chr(heapobj)) heapobj = 0 heapindex = 0 if not mainindex%(multiple+1): # we've got to a point where we should nick another bit from the byte if mainobj&BINLIST[bitindex]: # if the bit at binindex is set heapobj = heapobj|BINLIST[heapindex] # set the bit at heapindex heapindex += 1 bitindex += 1 mainindex += 1 continue if charindex == 7 and chunklist: # we've used up the current character from the chunk if charobj&BINLIST[charindex]: heapobj = heapobj|BINLIST[heapindex] charobj = chunklist.pop(0) charindex = 0 heapindex += 1 mainindex += 1 continue if charobj&BINLIST[charindex]: heapobj = heapobj|BINLIST[heapindex] heapindex += 1 charindex += 1 mainindex += 1 if heapindex == 8: # if we've got all 8 bit's.. but the loop has ended... out.append(chr(heapobj)) return header, ''.join(out), ''.join([chr(char) for char in data1[startpos:]])
def get_from_json(data, key): """ Helper method to retrieve value from json :param data: json object :param key: key of value to retrieve :return: retrieved value if any, None otherwise """ try: result = data[key] return result except KeyError: return None
def getWebImgType_stream(stream): """ Get image file type of an image stream. Can be used to verify if an image is available for web. :param stream: The file you want to verify. :return: Image file type. If image is invalid, returns False. """ data = stream[:11] # JPG if data[:4] == b'\xff\xd8\xff\xe0' and data[6:11] == b'JFIF\x00': return 'jpeg' # PNG if data[:6] == b'\x89PNG\r\n': return 'png' # GIF if data[:3] == b'\x47\x49\x46\x38': return 'gif' # Format not recognised return False
def sum_rec_edgecenter_idx(x, i, j): """ """ if i < j: return x[i] + x[j] + sum_rec_edgecenter_idx(x, i + 1, j - 1) if i == j: return x[i] return 0
def servo_duty_cycle(pulse_ms, frequency = 60): """ Formula for working out the servo duty_cycle at 16 bit input """ period_ms = 1.0 / frequency * 1000.0 duty_cycle = int(pulse_ms / 1000 / (period_ms / 65535.0)) return duty_cycle
def get_card_at_top_index(deck): """ (list of int) -> int Using the value of the top card as an index, return the card in the deck at that index. If the top card is a big joker, then use the value of the small joker instead. >>> deck = [1, 2, 3, 4, 5, 6, 7, 10, 9, 8] >>> get_card_at_top_index(deck) 2 >>> deck = [5, 6, 7, 8, 9, 10, 14, 1, 2, 3, 4, 13, 11, 12] >>> get_card_at_top_index(deck) 10 """ # This is again, the special case of this function. if deck[0] == max(deck): value = max(deck) - 1 card = deck[value] # This will simply get the card using the top card's index. else: value = deck[0] card = deck[value] return card
def pipe(value, *functions, funcs=None): """pipe(value, f, g, h) == h(g(f(value)))""" if funcs: functions = funcs for function in functions: value = function(value) return value
def missing_no(nums: list) -> int: """ This function finds the missing number """ return list(set(nums) ^ set(range(0, 101)))[0]
def _parse_two_level_dict(dat): """Get hierarchical list information from queued message Args: dat (bytes): received message data Returns: dict: dictionary of information list """ results = dict() count = int.from_bytes(dat[:2], 'big') ptr = 2 for i in range(count): first_id = dat[ptr:ptr+32] ptr += 32 results[first_id] = list() count2 = int.from_bytes(dat[ptr:ptr+2], 'big') ptr += 2 for j in range(count2): second_id = dat[ptr:ptr+32] ptr += 32 results[first_id].append(second_id) return results
def findlevel(levels, indent): """Remove all level information of levels with a greater level of indentation. Then return which level should insert this paragraph """ keys = list(levels.keys()) for key in keys: if levels[key] > indent: del levels[key] keys = levels.keys() if not keys: return 0 else: for key in keys: if levels[key] == indent: return key highest = 0 for key in keys: if key > highest: highest = key return highest-1
def fahrenheit_to_celsius(fahrenheit): """ Given Fahrenheit, convert to Celsius """ celsius = (fahrenheit - 32.) * (5. / 9.) return celsius
def find_2_integers_multiplies_to_number(numbers, goal): """ Part 1 - Find 2 integers that multiplies to a certain number n. Time: O(n) Space: O(n) """ shown = set() result = None for n in numbers: if goal // n in shown: result = (n, goal // n) else: shown.add(n) return result
def get_correct_cpg_transcript(vep_csq_records): """ Function that considers an array of VEP CSQ records and picks most relevant consequence (and gene) from neighbouring genes/transcripts of relevance for cancer predisposition (cpg = cancer predisposition gene) """ csq_idx = 0 if len(vep_csq_records) == 1: return csq_idx ## some variants iare assigned multiple transcript consequences ## if cancer predisposition genes are in the vicinity of other genes, choose the cancer predisposition gene ## if there are neighbouring cancer-predispositon genes, choose custom gene, preferring coding change (see below, KLLN/PTEN, XPC/TMEM43, NTHL1/TSC2) csq_idx_dict = {} for g in ['KLLN','PTEN','XPC','TMEM43','NTHL1','TSC2']: csq_idx_dict[g] = {} csq_idx_dict[g]['idx'] = -1 csq_idx_dict[g]['coding'] = False j = 0 while j < len(vep_csq_records): if 'CANCER_PREDISPOSITION_SOURCE' in vep_csq_records[j].keys() or 'GE_PANEL_ID' in vep_csq_records[j].keys(): csq_idx = j if 'SYMBOL' in vep_csq_records[j].keys(): if vep_csq_records[j]['SYMBOL'] in csq_idx_dict.keys(): csq_idx_dict[str(vep_csq_records[j]['SYMBOL'])]['idx'] = j if vep_csq_records[j]['CODING_STATUS'] == 'coding': csq_idx = j # prefer coding on over anything else csq_idx_dict[str(vep_csq_records[j]['SYMBOL'])]['coding'] = True j = j + 1 if csq_idx_dict['KLLN']['idx'] != -1 and csq_idx_dict['PTEN']['idx'] != -1: csq_idx = csq_idx_dict['PTEN']['idx'] if csq_idx_dict['KLLN']['coding'] is True: csq_idx = csq_idx_dict['KLLN']['idx'] if csq_idx_dict['XPC']['idx'] != -1 and csq_idx_dict['TMEM43']['idx'] != -1: csq_idx = csq_idx_dict['XPC']['idx'] if csq_idx_dict['TMEM43']['coding'] is True: csq_idx = csq_idx_dict['TMEM43']['idx'] if csq_idx_dict['TSC2']['idx'] != -1 and csq_idx_dict['NTHL1']['idx'] != -1: csq_idx = csq_idx_dict['TSC2']['idx'] if csq_idx_dict['NTHL1']['coding'] is True: csq_idx = csq_idx_dict['NTHL1']['idx'] if csq_idx is None: csq_idx = 0 return csq_idx
def _convert_to_dict(caps_str: str) -> dict: """ Parses the VCP capabilities string to a dictionary. Non continuous capabilities will include an array of all supported values. Returns: Dict with all capabilities in hex Example: Expected string "04 14(05 06) 16" is converted to:: { 0x04: [], 0x14: [0x05, 0x06], 0x16: [], } """ if len(caps_str) == 0: # Sometimes the keys arent found and the extracting of # capabilities returns an empty string. return {} result_dict = {} group = None prev_digit = None for chunk in caps_str.replace("(", " ( ").replace(")", " ) ").split(" "): if chunk == "": continue elif chunk == "(": group = prev_digit elif chunk == ")": group = None else: val = int(chunk, 16) if group is None: result_dict[val] = [] else: result_dict[group].append(val) prev_digit = val return result_dict
def rdist3(x, y): """Optimized rdist for 3D arrays.""" return (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2
def lum(c): """Returns luminosity as ImageMath operands. The formula is defined as: Lum(C) = 0.3 x Cred + 0.59 x Cgreen + 0.11 x Cblue See: https://www.w3.org/TR/compositing-1/#blendingnonseparable Arguments: c: A tuple/list of 3 ImageMath operands. The color. Returns: A tuple/list of 3 ImageMath operands. The luminosity. """ r, g, b = c return r * .3 + g * .59 + b * .11
def convective_overturn_time(VKs): """ Calculate the convective overturn time based on the wright 2011 relation. """ if VKs < 3.5: return 0.73 + 0.22 * VKs else: return -2.16 + 1.50 * VKs - 0.13 * VKs**2
def load_coin_value(file_name): """ load the starting value of a coin for a given file_name """ try: with open(file_name + 'spy.txt', 'r') as f: file_content = f.readline() whitespace = file_content.find(" ") file_content = file_content[1:whitespace] return file_content except EnvironmentError: return False
def get_match_matrix_row(num_columns: int, matched_columns: list) -> list: """ There is a function that creates a list which will be transformed into matrix row in future. All elements in the list equal zeros, apart from the ones with indices given in matched_columns list - they equal ones. :param num_columns: length of created list = future row vector 2nd dimensionality :param matched_columns: list of element indices that equal to 1 :return: binary list """ matrix_row = [0] * num_columns if matched_columns: for m_column in matched_columns: matrix_row[m_column] = 1 return matrix_row
def preprocess(X): """ Optional preprocessing for secondary data format. @param X: list of lists of lists (glogs, function calls, function call + attributes) """ for i in range(len(X)): for j in range(len(X[i])): X[i][j] = ' '.join([str(x) for x in X[i][j]]) X[i] = '\n'.join(X[i]) return X
def remove_oneof(data): """ Removes oneOf key from a dict and recursively calls this function on other dict values. """ if 'oneOf' in data: del data['oneOf'] for key in data: if isinstance(data[key], dict): remove_oneof(data[key]) return data
def hex_to_rgb(value): """ Converts hex to rgb colours value: string of 6 characters representing a hex colour. Returns: list length 3 of RGB values""" value = value.strip("#") # removes hash symbol if present lv = len(value) return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def es_compat_hits_total(resp): """ Given a search response dict, support ES6 and ES7 style total value. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html It is responsibility of the call site to set `track_total_hits` in ES7 to get an exact number (https://www.elastic.co/guide/en/elasticsearch/reference/master/search-your-data.html#track-total-hits). """ try: return resp["hits"]["total"]["value"] # ES7 except TypeError: return resp["hits"]["total"]
def polyXY2(x,y,coeff): """XY quadratic with cross-terms | - | x | x2| ---+---+---+---+ - | a | b | d | y | c | e | g | y2| f | h | i | """ a,b,c,d,e,f,g,h,i = coeff return a + x*(b + x*d) + y*(c + y*f) + x*y*(e + x*g + y*h + x*y*i)
def get_tf_config(worker_num, index, ports, machine_list=None): """Generate tf config.""" hosts = [] if machine_list is not None: hosts = machine_list.split(",") hosts = [host if host.startswith('"') else '"{}"'.format(host) for host in hosts] else: ip = '127.0.0.1' for i in range(worker_num): port = ports[i] hosts.append('"{}:{}"'.format(ip, port)) tf_config = 'TF_CONFIG=\'{"cluster":{"worker":[' + ','.join(hosts) +']},"task":{"type":"worker","index":' + str(index) + '}}\'' return tf_config
def findCumLabel(row, cv=5): """ Find label for the given data based on multiple cv models. Labels are assigned based on mode. """ labels = [row["Label_{}".format(x)] for x in range(cv) if row["top_stat_{}".format(x)] == 1] countTSG = labels.count("TSG") countOG = labels.count("OG") if countTSG > countOG: return "TSG" elif countOG > countTSG: return "OG" else: return "Unlabelled"
def greater_than_something(num1, num2): """This function checks if the first number is greater than the second. >>> greater_than_something(2,3) False >>> greater_than_something(-4, 4) False >>> greater_than_something(-3, -5) True """ return(num1 > num2)
def check_bit(number, index): """See if the bit at `index` in binary representation of `number` is on""" mask = 1 << index return bool(number & mask)
def is_transform(str_in): """Determines if str_in is the name of a transformation file """ return '.ima' in str_in \ or '.trm' in str_in
def need_generate_func(func_line): """ :param func_line: :return: """ if func_line.strip().endswith("default") or func_line.strip().endswith("delete") \ or func_line.strip().startswith("typedef") or func_line.strip().startswith("using"): return False return True
def divide_by_scalar(lst, s): """ Divides each element in 'lst' by the scalar 's'. Returns a new list with each element of the new list equal to the element at the same position in 'lst' divided by 's'. """ return [l / float(s) for l in lst]
def isoddv1(n): """ Is odd number >>> isoddv1(3) True """ return not n % 2 == 0
def standard_immersionfactor(nwater, ng): """ Immersion factor computed for small angles and by considering only the transmittance at the water(air)-glass interface. Equation 9 of Zibordi (2006). :param nw: :param nm: :return: """ return (nwater * (nwater + ng) ** 2) / (1 + ng) ** 2
def sqlite_table_indexes(c=None, table=None): """ """ db_index = {} if c is not None and table is not None: c.execute('PRAGMA index_list({0})'.format(table)) rows = c.fetchall() for r in rows: db_index[r['name']] = {} db_index[r['name']]['infos'] = r for idx in db_index.keys(): c.execute('PRAGMA index_info({0})'.format(idx)) rows = c.fetchall() db_index[idx]['composed_of'] = [] for r in rows: db_index[idx]['composed_of'].append(r) return db_index
def timecode_convert(timecode): """Get Seconds from timecode.""" timecode = timecode.split(':') if len(timecode) == 1: return int(timecode[0]) elif len(timecode) == 2: m, s = timecode[0], timecode[1] return int(m) * 60 + int(s) elif len(timecode) == 3: h, m, s = timecode[0], timecode[1], timecode[2] return int(h) * 3600 + int(m) * 60 + int(s)
def build_category_index(data): """ Returns mappings from index to category and vice versa. Args: data: list of dicts, each dict is one sample """ categories = set() for sample in data: categories.add(sample['category']) categories = sorted(list(categories)) idx2cat = {i: cat for i, cat in enumerate(categories)} cat2idx = {cat: i for i, cat in idx2cat.items()} return idx2cat, cat2idx
def _arraylen(seq): """If seq is None, return 0, else len(seq). CPLEX often requires a count argument to specify the length of subsequent array arguments. This function allows us to return a length of 0 for None (i.e., NULL) arrays. """ if seq is None: return 0 return len(seq)
def heptagonalNum(n): """Returns the nth heptagonal number.""" return int(n * (5*n - 3) / 2)
def read_pathways_text(pathways_text): """Read a pathways string and return it as a list""" pathways = [] # Split by the double forward slashes for pathway in pathways_text.split("//"): # Remove newline at start and end pathway = pathway.strip() reactions = [] # Remove lines beginning with ">" for line in pathway.split("\n"): if line.startswith(">"): continue else: reactions.append(line) # Skip empty pathway pathways.append("\n".join(reactions)) return list(filter(None, pathways))
def shortlabel(label): """Shorten a graph node label.""" return label and label.replace('\\l', '\n').splitlines()[0]
def sysctl_devname(devname): """ Translate a device name to the name used with sysctl. :param str devname: device name to translate :return: translated device name :rtype: str """ if devname is None: return None return devname.replace(".", "/")
def building_class(BIM): """ Short description Long description Parameters ---------- BIM: dictionary Information about the building characteristics. Returns ------- bldg_class: str One of the standard building class labels from HAZUS """ if ((BIM['roof_shape'] != 'flt') and (BIM['stories'] <= 2) and (BIM['area'] < 2000.0)): return 'WSF' else: return 'WMUH'
def format_duration(s): """ Format the duration of a recording for iTunes RSS format. """ parts = s.split(", ") hours, minutes, seconds = 0, 0, 0 parse_str = lambda x: int(x.split(" ")[0].strip()) for p in parts: if 'hour' in p: hours += parse_str(p) elif 'minute' in p: minutes += parse_str(p) elif 'second' in p: seconds += parse_str(p) return "%02d:%02d:%02d" % (hours, minutes, seconds)
def extract_name(fullname): """ Reads the csv and extracts names by splitting them as first/last name so that it can be passed to the API call. Note: The extra cases could have been avoided while forming the CSV i.e. improving on the design and structure of web scraper. """ full_name = fullname[0].strip().split(' ') if len(full_name) == 1: first_name = full_name[0].strip() last_name = "" elif len(full_name) == 2: first_name = full_name[0].strip() last_name = full_name[1].strip() elif len(full_name) == 3: first_name = full_name[0].strip() + " " + full_name[1].strip() last_name = full_name[2].strip() else: # len(full_name) == 4: first_name = full_name[0].strip() + " " + full_name[1].strip() last_name = full_name[2].strip() + " " + full_name[3].strip() return first_name, last_name
def vigenere(abc, text, secret, decrypt=False): """Simple Vigenere encoder/decoder over the dictionary""" ignore = ' ,.!?-+=' abcl = len(abc) result = '' ti = [] si = [] for t in text: if t in ignore: ti.append(t) # Do not encode punctuation marks else: ti.append(abc.index(t)) for s in secret: si.append(abc.index(s)) ps = 0 for pt in range(len(text)): if ps == len(si): ps = 0 if str(ti[pt]) in ignore: result += ti[pt] # Pass punctuation mark transparently else: if decrypt: result += abc[(ti[pt] - si[ps] + abcl) % abcl] else: result += abc[(ti[pt] + si[ps]) % abcl] ps += 1 return result
def getDiceSetSim(concepts_1: set, concepts_2: set): """ Returns Dice Set Similarity for the given concept sets """ intersection = len(concepts_1.intersection(concepts_2)) return (2*intersection)/(len(concepts_2)+len(concepts_1))
def pks_from_iterable(iterable, unique_output=False): """ Return pks list based on iterable :param iterable: list of django model objects OR django queryset :param unique_output: if True returned list will be unique :return: list of int """ pks = list() for obj in iterable: try: pks.append(int(getattr(obj, 'pk', obj))) except (TypeError, ValueError): raise TypeError("Iterable %s is not any of Queryset, list with django Model objects or ints" % iterable) return list(set(pks)) if unique_output else pks
def uses_only(word, letters): """Write a function named uses_only that takes a word and a string of letters, and that returns True if the word contains only letters in the list.""" for letter in letters: if letter not in word: return False return True
def is_iter( obj ): """ evaluate if the object can be iterate Arguments --------- obj: object Returns ------- bool """ try: iter( obj ) return True except TypeError: return False
def format_time_string(hours, minutes, seconds): """Format a time value into a string. Parameters ---------- hours, minutes, seconds : float Time value, represented as hours, minutes, and seconds. Returns ------- str A string representation of the time value. """ base = '{:1.2f} hours, {:1.2f} minutes, and {:1.2f} seconds.' return base.format(hours, minutes, seconds)
def insert_ascii(reaction_id): """ Replace normal signs with the ascii number to match ids with the spreadsheet """ return reaction_id.replace("-","__45__").replace("(","__40__").replace(")","__41__").replace(".", "__46__").replace("+", "__43__")
def get_token(catalog): """Get Keystone authentication token""" return catalog['access']['token']['id']
def combine_abstract_text(text1, text2): """markovify works best when each corpus is a single huge string. theefore, reduce by key here""" print(text1[:20], text2[:20]) return text1+text2
def _posix_rjoin(a, b): """Join two pathname components, inserting '/' as needed. If the second component is an absolute path, the first one will be discarded. An empty last part will result in a path that ends with a separator.""" path = a if b.startswith('/'): path = b elif path == '' or path.endswith('/'): path += b else: path += '/' + b return path
def solution(A): """Compute the maximum product of any triplet within a list. Args: A (list): A collection of N integers. Returns: int: The maximum pruduct that can be derived from A. Complexity: Time: O(N * log(N)) Space: O(N) """ if len(A) < 3: return 0 A = sorted(A) product_A = A[0] * A[1] * A[-1] product_B = A[-1] * A[-2] * A[-3] max_product = max(product_A, product_B) return max_product
def angular_overlap_analytical(L_1, L_2, M_1, M_2,para): """ Angular overlap `<l1, m| f(theta,phi) |l2, m>`. can compute parallel and perpendicular interactions. Taken from A. Morgan. """ dL = L_2 - L_1 dM = M_2 - M_1 L, M = int(L_1), int(M_1) overlap = 0.0 if para == True: if (dM == 0): if dL == +1: overlap = (+(((L+1)**2-M**2)/((2*L+3)*(2*L+1)))**0.5) elif dL == -1: overlap = (+((L**2-M**2)/((2*L+1)*(2*L-1)))**0.5) elif (dM == +1): if dL == +1: overlap = (-((L+M+2)*(L+M+1)/(2*(2*L+3)*(2*L+1)))**0.5) elif dL == -1: overlap = (+((L-M)*(L-M-1)/(2*(2*L+1)*(2*L-1)))**0.5) elif (dM == -1): if dL == +1: overlap = (+((L-M+2)*(L-M+1)/(2*(2*L+3)*(2*L+1)))**0.5) elif dL == -1: overlap = (-((L+M)*(L+M-1)/(2*(2*L+1)*(2*L-1)))**0.5) if para == False: if dM == +1: if dL == +1: overlap = (+(0.5*(-1)**(M-2*L)) * (((L+M+1)*(L+M+2))/((2*L+1)*(2*L+3)))**0.5) elif dL == -1: overlap = (-(0.5*(-1)**(-M+2*L)) * (((L-M-1)*(L-M)) /((2*L-1)*(2*L+1)))**0.5) elif dM == -1: if dL == +1: overlap = (+(0.5*(-1)**(M-2*L)) * (((L-M+1)*(L-M+2))/((2*L+1)*(2*L+3)))**0.5) elif dL == -1: overlap = (-(0.5*(-1)**(-M+2*L)) * (((L+M-1)*(L+M)) /((2*L-1)*(2*L+1)))**0.5) return overlap
def GetScrollInputLatencyEvents(scroll_type, browser_process, timeline_range): """Get scroll events' LatencyInfo from the browser process's trace buffer that are within the timeline_range. Scroll events (MouseWheel, GestureScrollUpdate or JS scroll on TouchMove) dump their LatencyInfo into trace buffer as async trace event with name "InputLatency". The trace event has a memeber 'step' containing its event type and a memeber 'data' containing its latency history. """ scroll_events = [] if not browser_process: return scroll_events for event in browser_process.IterAllAsyncSlicesOfName("InputLatency"): if event.start >= timeline_range.min and event.end <= timeline_range.max: for ss in event.sub_slices: if 'step' not in ss.args: continue if 'data' not in ss.args: continue if ss.args['step'] == scroll_type: scroll_events.append(ss) return scroll_events
def get_objname_from_tuple(obj_name_tuple): """Given a O, C, I tuple, return its string full name (e.g 0&0&DEFINING_ORIGIN). """ O, C, I = obj_name_tuple return str(O) + '&' + str(C) + '&' + I
def RestrictDictValues( aDict, restrictSet ): """Return a dict which has the mappings from the original dict only for values in the given set""" return dict( item for item in aDict.items() if item[1] in restrictSet )
def remove_english_alphabets(text: str): """ Removes ``English`` words and digits from a ``text`` Args: text (str): Urdu text Returns: str: ``str`` object with english alphabets removed """ characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890" table = str.maketrans({key: None for key in characters}) return text.translate(table)
def rho_correction(goals_home, goals_away, home_exp, away_exp, rho): """ Applies the dixon and coles correction """ if goals_home == 0 and goals_away == 0: return 1 - (home_exp * away_exp * rho) elif goals_home == 0 and goals_away == 1: return 1 + (home_exp * rho) elif goals_home == 1 and goals_away == 0: return 1 + (away_exp * rho) elif goals_home == 1 and goals_away == 1: return 1 - rho else: return 1.0
def format_usage(usage, rate): """format the resource usage to show integer if greater than the rate, otherwise show one decimal place.""" usage = float(usage) / rate if usage < 1: usage = round(usage, 1) else: usage = int(round(usage, 0)) return usage
def getPermutations(level, valsList): """ In : ff(0,[[1,2],[3,4]]) Out: [[1, 3], [1, 4], [2, 3], [2, 4]] """ # ipdb.set_trace(); if (level >= len(valsList)): return []; aList = []; suffixList = getPermutations(level+1, valsList); for v in valsList[level]: if (len(suffixList) == 0): aList.append( [v] ); else: for suffix in suffixList: aList.append( [v] + suffix ); return aList;
def bbox_structure_to_square(bbox): """Function to turn from bbox coco struture to square. [x,y,width,height] -> [min_height, min_width, max_height, max_width] """ x,y,width,height = bbox sq = [y,x,y+height,x+width] return sq
def format_todo_entry(commit_id, issue, isodate, message, files=None, sizes=None): """ >>> format_todo_entry(b'123456', b'TEST-123', b'2018-03-12', b'initial version which can \\nfind issues in actual commits TEST-123\\n') b'123456 TEST-123 2018-03-12 initial version which can ---find issues in actual commits TEST-123---' >>> format_todo_entry(b'123456', b'TEST-123', b'2018-03-12', b'initial version which can \\nfind issues in actual commits TEST-123\\n', [b'retrieve_commits_and_issues.py', b'MOO']) b'123456 TEST-123 2018-03-12 :retrieve_commits_and_issues.py:MOO: initial version which can ---find issues in actual commits TEST-123---' >>> format_todo_entry(b'123456', b'TEST-123', b'2018-03-12', b'initial version which can \\nfind issues in actual commits TEST-123\\n', [b'retrieve_commits_and_issues.py', b'MOO'], [3769, 423]) b'123456 TEST-123 2018-03-12 :retrieve_commits_and_issues.py,3769:MOO,423: initial version which can ---find issues in actual commits TEST-123---' """ if files is not None: if sizes is not None: filesandsizes = zip(files, (str(i).encode() for i in sizes)) return b' '.join([commit_id, issue, isodate, b":" + b":".join(b",".join(i) for i in filesandsizes) + b":", message.replace(b'\n', b'---')]) else: return b' '.join([commit_id, issue, isodate, b":" + b":".join(files) + b":", message.replace(b'\n', b'---')]) else: return b' '.join([commit_id, issue, isodate, message.replace(b'\n', b'---')])
def _preprocess(state, preprocessors): """ Method to apply state preprocessors. Args: state (np.ndarray): the state to be preprocessed. Returns: The preprocessed state. """ for p in preprocessors: state = p(state) return state
def positive_int_from_str(string, base=0): """ Accept a string. Return as a positive integer if possible, else raise """ try: maybe = int(string, base=base) except: raise ValueError if abs(maybe) != maybe: raise ValueError return maybe
def replace_keys(item, keymap): """kopie van toe te voegen deelstructuur maken met vervangen van oude key door nieuwe key volgens keymap """ item = list(item) # mutable maken oldkey = int(item[1]) item[1] = keymap[oldkey] hlp = [] for subitem in item[2]: subitem = replace_keys(subitem, keymap) hlp.append(subitem) item[2] = hlp return tuple(item)
def format_response(**kwargs): """ take response params and format with a dictionary :param kwargs: :return: :rtype: `dict` """ response = { "status": kwargs["status"], "code": kwargs["code"], "message": kwargs["message"], "action": kwargs["action"], "files_ids": kwargs["files_ids"] } return response
def count_saccades(saccades): """ A Function that counts the number of distinct saccades :param saccades: a list with values which indicate if the move from the previos is a saccade. :return: a number of indicating the amount of different saccades """ saccade_count = 0 is_currently = False for value in saccades: if value == 1 and is_currently == False: saccade_count += 1 is_currently = True if value == 0 and is_currently == True: is_currently = False return saccade_count
def binStringToInt(binString): """Converts a string of binary digits to an integer of base 10""" return int(binString,2)
def get_action_name_from_action(action): """ Returns the lowercase action name from a service:action combination :param action: ec2:DescribeInstance :return: describeinstance """ service, action_name = action.split(':') # pylint: disable=unused-variable return str.lower(action_name)
def GetLeadSpaces(title): """Return the leading spaces of the string""" spaces = '' for i in range(0, len(title)): if not title[i].isspace(): break spaces += title[i] return spaces
def standard_param_name(name): """Convert parameter name to namespace format""" return name.lstrip("/").lstrip("-").replace("-", "_")
def _validate_prefix(prefix, avoid): """ Validate an install prefix. """ if prefix.startswith(avoid): rest = prefix[0:len(avoid)] return rest != "/" and rest != "" return True
def inertia_update(iteration,n_iterations,wmin,wmax): """ Time varying acceleration inertia: w^k = wmax - (wmax - wmin)/kmax * k Parameters: iteration: int The number of the iteration n_iterations: int The number of total iterations wmin: float The minimum value of the itertia weight wmax: float The maximum value of the inertia weight Returns: float The new intertia weight value """ W=wmax-((wmax-wmin)/n_iterations)*iteration return W
def deep_hasattr(obj, pathname): """Returns True if the attrbute indicated by the give pathname exists, False otherwise. """ try: parts = pathname.split('.') for name in parts[:-1]: obj = getattr(obj, name) except Exception: return False return hasattr(obj, parts[-1])
def index_containing_substring(list_str, pattern): """For a given list of strings finds the index of the element that contains the substring. Parameters ---------- list_str: list of str pattern: str pattern Returns ------- indices: list of int the indices where the pattern matches """ indices = [] for i, s in enumerate(list_str): if pattern in s: indices.append(i) return indices
def readfile(path, logger): """ return stripped file contents, or None on errors """ if path: try: with open(path, mode="rb") as f: return f.read().strip() except IOError as e: if logger: logger.error("Failed to read file '%s': %s", path, e) return None
def _rightmost_descendants(node): """ Returns the set of all nodes descended in some way through right branches from this node. """ try: rightmost_leaf = max(node.treepositions()) except AttributeError: return [] return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)]
def per_mode_small_48(x): """Takes Numeric Code and returns String API code Input Values: 1:"Totals", 2:"PerGame", 3:"Per48" Used in: """ measure = {1: "Totals", 2: "PerGame", 3: "Per48"} try: return measure[x] except: raise ValueError("Please enter a number between 1 and " + str(len(measure)))
def decrement_items(inventory, items): """Decrement items in inventory using elements from the `items` list. :param inventory: dict - inventory dictionary. :param items: list - list of items to decrement from the inventory. :return: dict - updated inventory with items decremented. """ for item in items: if item in inventory: inventory[item] = max(inventory[item] - 1, 0) return inventory
def extract_p(path, dict_obj, default): """ try to extract dict value in key path, if key error provide default :param path: the nested dict key path, separated by '.' (therefore no dots in key names allowed) :param dict_obj: the dictinary object from which to extract :param default: a default return value if key error :return: extracted value """ if dict_obj is None: return default keys = path.split('.') tmp_iter = dict_obj for key in keys: try: # dict.get() might make KeyError exception unnecessary tmp_iter = tmp_iter.get(key, default) except KeyError: return default return tmp_iter
def get_index_of_char(my_string, char): """Returns all indices of all appearances of char in str :param file_name: :param char: :return: """ return [x for x, v in enumerate(my_string) if v == char]
def _resolve_path(a: tuple, b: tuple) -> str: """ Resolve the path from 'a' to 'b' and give a direction on it """ if a[0] == b[0] and a[1] + 1 == b[1]: return 'h' if a[0] + 1 == b[0] and b[1] == a[1]: return 'w' if a[0] + 1 == b[0] and a[1] + 1 == b[1]: return 'b' raise Exception("Unexpected path")
def is_number(n): """ Checa si un string es un numero, si lo es regresa el numero de lo contrario regresa False """ try: return int(n) except Exception as e: return False
def compare(s1, s2, strict=False): """version.compare compares two version strings (format X.Y.Z) if s1 < s2, returns -1 if s1 == s2, returns 0 if s1 > s2, returns 1 if strict=False (default) 2.1 == 2 if strict=True 2 < 2.1 """ s1v = s1.split('.') s2v = s2.split('.') #process both string version in parts for (v1, v2) in zip(s1v, s2v): if v1 == v2: continue try: iv1 = int(v1) iv2 = int(v2) except: # this part of the string version is not a number # it is not clear what to do, let's compare the strings if v1<v2: return -1 else: return 1 if iv1<iv2: return -1 else: return 1 # if we did not return, all compared parts were equal if not strict: return 0 # but they may have different sizes: # the shortest is considered smaller when strict=True if len(s1v) < len(s2v): return -1 elif len(s2v) < len(s1v): return 1 else: return 0
def binary_search_lookup(val, l): """Returns the index of where the val is in a sorted list. If val is not in l, then return the index of the element directly below l. Example: >>> binary_search_lookup(10.0, [-5.0, 6.0, 10.0, 100.0]) 2 >>> binary_search_lookup(5.0, [-5.0, 4.0, 10.0, 100.0]) 1 >>> binary_search_lookup(11.0, [-5.0, 4.0, 10.0, 100.0]) 2 """ up = len(l) - 1 lo = 0 look = (up + lo) // 2 while abs(up - lo) > 1: if l[look] == val: return look if val < l[look]: # We need to look lower. up = look look = (up + lo) // 2 else: # We need to look higher. lo = look look = (up + lo) // 2 # Didn't find the exact match, return the lower bound then. return lo