content
stringlengths
42
6.51k
def clamp(minValue, maxValue, value): """Make sure value is between minValue and maxValue""" if value < minValue: return minValue if value > maxValue: return maxValue return value
def json_update(root, new_root): """Recursively update a JSON document with another JSON document, merging where necessary.""" if isinstance(root, dict) and isinstance(new_root, dict): for field in new_root: field_value = root[field] if field in root else None new_field_value = new_root[field] root[field] = json_update(field_value, new_field_value) return root return new_root
def descending(x): """ Input: x - a list of integers, e.g. [2,5,6,0,1,3] Output: desc - next integer in descending order, e.g. [6] desc + descending(x) + ... - a list of integers in descending order, e.g. [6,5,3,2,1,0] """ if len(x) == 0: return [] else: mx = max(x) desc = [mx] x.remove(mx) return desc + descending(x)
def evaluate_dnf(formula,true_props): """ Evaluates 'formula' assuming 'true_props' are the only true propositions and the rest are false. e.g. evaluate_dnf("a&b|!c&d","d") returns True """ # ORs if "|" in formula: for f in formula.split("|"): if evaluate_dnf(f,true_props): return True return False # ANDs if "&" in formula: for f in formula.split("&"): if not evaluate_dnf(f,true_props): return False return True # NOT if formula.startswith("!"): return not evaluate_dnf(formula[1:],true_props) # Base cases if formula == "True": return True if formula == "False": return False return formula in true_props
def matrix_to_string( matrix, row_headers=None, col_headers=None, fmtfun=lambda x: str(int(x)) ): """Takes a 2D matrix (as nested list) and returns a string. Parameters ---------- matrix : row_headers : (Default value = None) col_headers : (Default value = None) fmtfun : (Default value = lambda x: str(int(x))) Returns ------- """ ret = [] if col_headers: ret.append(("\t" if row_headers else "") + "\t".join(col_headers)) if row_headers: ret += [ rh + "\t" + "\t".join(fmtfun(f) for f in row) for rh, row in zip(row_headers, matrix) ] else: ret += ["\t".join(fmtfun(f) for f in row) for row in matrix] return "\n".join(ret)
def sorted_equality(v1, v2, read): """ Equality for fields where the values must be sorted before equality tested. """ return sorted(v1) == sorted(v2)
def resolve_propertylist(propertylist): """ Resolve property list received from click options. Click options produces an empty list when there is no property list. Pywbem requires None when there is no propertylist Further, property lists can be input as a comma separated list so this function also splits any string with embedded commas. Parameters (list of :term:`string` or None): Each item in list may be a single property name or a collection of property names separated by commas. Returns: list of property names resulting from parsing input or empty list or None """ # If no property list, return None which means all properties if not propertylist: return None # If propertylist is a single empty string, set to empty list. if len(propertylist) == 1 and not propertylist[0]: propertylist = [] # expand any comma separated entries in the list else: pl = [] for item in propertylist: if ',' in item: pl.extend(item.split(',')) else: pl.append(item) propertylist = pl return propertylist
def make_great(names): """Modify magicans list by add the phrase 'the Great'.""" return ['the Great ' + x for x in names]
def arrayRankTransform(arr): """ :type arr: List[int] :rtype: List[int] """ arr_1 = arr[:] arr_1.sort() list_1 = [] final_list = [] j=0 for i in range(len(arr_1)): if i>0 and arr_1[i] != arr_1[i-1]: list_1.append(j+1) elif i==0: list_1.append(j+1) elif i>0 and arr_1[i] == arr_1[i-1]: list_1.append(j) j = j-1 else: list_1.append(j+1) j+=1 dict_1 = dict(zip(arr_1,list_1)) for j in range(len(arr)): final_list.append(dict_1[arr[j]]) return final_list
def half_number(number): """ half the number for range functions """ return (number//2 + 1)
def split_dir_to_str(split_dir: str) -> str: """ Return the name that should be printed to the user for the given split dir. """ return 'monorepo root' if split_dir == '-' else split_dir
def find_min(minimum, t2): """ Find the minimum. ans = find_min(10, 9) = 9 ans = find_min(1, 7) = 1 ans = find_min(3, 3) = 3 """ if minimum > t2: return t2 return minimum
def get_noncentral_m2(mean: float, cv: float) -> float: """Compute non-central from the mean value and coef. of variation. """ return (mean * cv)**2 + mean**2
def split_loc_techs_transmission(transmission_string): """ from loc::tech:link get out a dictionary of {loc_from:loc, loc_to:link, tech:tech} """ loc, tech_link = transmission_string.split('::') tech, link = tech_link.split(':') return {'loc_from': loc, 'loc_to': link, 'tech': tech}
def linear_function(B,x): """ Purpose: Linear function for use with orthogonal distance regression. Usage: linear = scipy.odr.Model(qcutils.linear_function) where B is a list of slope and offset values x is an array of x values """ return B[0]*x + B[1]
def guidance_UV(index): """Return Met Office guidance regarding UV exposure based on UV index""" if 0 < index < 3: guidance = "Low exposure. No protection required. You can safely stay outside" elif 2 < index < 6: guidance = "Moderate exposure. Seek shade during midday hours, cover up and wear sunscreen" elif 5 < index < 8: guidance = "High exposure. Seek shade during midday hours, cover up and wear sunscreen" elif 7 < index < 11: guidance = "Very high. Avoid being outside during midday hours. Shirt, sunscreen and hat are essential" elif index > 10: guidance = "Extreme. Avoid being outside during midday hours. Shirt, sunscreen and hat essential." else: guidance = None return guidance
def setifset(idict, ikey): """return value from @idict if it exists, otherwise None""" if ikey in idict: return idict[ikey] else: return False
def check_autotune_params(iterations): """ Check if the autotune parameters are of the correct type and within range. """ if not isinstance(iterations, int): raise ValueError("Integer expected.") if iterations < 0: raise ValueError("Integer >= 0 expected.") message = f"Autotuning parameters: iterations = {iterations}." return message
def convert_float(val): """ Convert the string number value to a float - Remove $ - Remove commas - Convert to float type """ # new_val = val.replace(',','').replace('$', '') return float(val)
def create_word_dict(word): """Creates a dictionary from a word, with a count of every character""" w_dict = {} for char in word: if char in w_dict.keys(): w_dict[char] += 1 else: w_dict[char] = 1 return w_dict
def calculate_distance_between_colors(color1, color2): """ Takes 2 color tupes and returns the average between them """ return ((color1[0] + color2[0]) / 2, (color1[1] + color2[1]) / 2, (color1[2] + color2[2]) / 2)
def cast_ulonglong(value): """ Cast value to 64bit integer """ value = value & 0xFFFFFFFFFFFFFFFF return value
def get_item_hrefs(result_collection): """ Given a result_collection (returned by a previous API call that returns a collection, like get_bundle_list() or search()), return a list of item hrefs. 'result_collection' a JSON object returned by a previous API call. Returns a list, which may be empty if no items were found. """ # Argument error checking. assert result_collection is not None result = [] links = result_collection.get('_links') if links is not None: items = links.get('items') if items is not None: for item in items: result.append(item.get('href')) return result
def color_cell_latex(val): """ This function is aimed at coloring cells inside a latex table. A given content of a cell is extended by the latex command to color a cell in yellow. :param val: string or float Content of the latex table cell. :return: string Assemblage of the latex command to color a cell in yellow and the cell content. """ return "\cellcolor{yellow!50} " + str(val)
def mapToRange(val, src, dst): """ Map the given value from the range of src to the range of dst. """ return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
def remove_none_recursively(obj): """Remove none values from a dict. This is used here to support comparing provided config vs. config we retrive from kubernetes, which returns all fields, even those which have no value configured. """ if isinstance(obj, (list, tuple, set)): return type(obj)(remove_none_recursively(x) for x in obj if x is not None) elif isinstance(obj, dict): return type(obj)( (remove_none_recursively(k), remove_none_recursively(v)) for k, v in obj.items() if k is not None and v is not None ) else: return obj
def largest_factor(n): """Return the largest factor of n that is smaller than n. >>> largest_factor(15) # factors are 1, 3, 5 5 >>> largest_factor(80) # factors are 1, 2, 4, 5, 8, 10, 16, 20, 40 40 >>> largest_factor(13) # factor is 1 since 13 is prime 1 """ tmp = 1 for x in range(1, n) : if (n % x == 0): tmp = max(x , tmp) return tmp
def from_set (X: set) -> int: """ Converts a set of factors into an integer. This function merely returns the product of the numbers in 'x'.""" r = 1 for x in X: r *= x return r
def accepts_lines(block_type): """ Returns true if block type can accept lines of text. """ return block_type in ['Paragraph', 'IndentedCode', 'FencedCode']
def moves_to_synchronous_moves(moves, loads): """ translates the list of moves returned from the traffic jam solver to a list of moves that can be made concurrently. :param moves: list of loads and moves, e.g. [{1: (2,3)}, {2:(1,2)}, ... ] :param loads: dict with loads and paths, e.g. {1: [2,3,4], 2: [1,2,3], ... } :return: list of synchronous loads and moves, e.g. [{1:(2,3), {2:(1,2}}, {1:(3,4), 2:(2,3)}, ...] """ moves = [(k,) + v for move in moves for k, v in move.items()] # create independent copy assert isinstance(loads, dict) assert all(isinstance(t, (list, tuple)) for t in loads.values()) occupied_locations = {L[0] for L in loads.values()} # loads are required in case that a load doesn't move. synchronuous_moves = [] while moves: current_moves = {} for move in moves[:]: load, n1, n2 = move if load in current_moves: break if n2 in occupied_locations: continue current_moves[load] = (n1,n2) occupied_locations.remove(n1) occupied_locations.add(n2) moves.remove(move) synchronuous_moves.append(current_moves) return synchronuous_moves
def _transform_record (record): """Transform a record from a list of fields to a dict. Doesn't handle nested records. """ if isinstance(record, list): # value can only be missing if it was an empty sequence of tokens, # which should have become a list, which we should transform into a dict return {field['name']: field.get('value', {}) for field in record} else: return record
def sec2hms(seconds): """ return seconds as (hh,mm,ss)""" hr = int(seconds) mn = int((seconds*3600 - hr*3600)/60) sec = seconds*3600 - hr*3600 - mn*60 return [hr, mn, sec]
def GetBuildShortBaseName(target_platform): """Returns the build base directory. Args: target_platform: Target platform. Returns: Build base directory. Raises: RuntimeError: if target_platform is not supported. """ platform_dict = { 'Windows': 'out_win', 'Mac': 'out_mac', 'Linux': 'out_linux', 'Android': 'out_android', 'NaCl': 'out_nacl' } if target_platform not in platform_dict: raise RuntimeError('Unkown target_platform: ' + (target_platform or 'None')) return platform_dict[target_platform]
def _code_block(text): """ Returns a code block string """ return f'```\n{text}\n```\n\n'
def dimensionsKeepAspect(targetWidth, targetHeight, oldWidth, oldHeight): """ Gives resizing dimensions to keep an image within (targetWidth, targetHeight) while preserving the original aspect ratio. Does not upsize iamges smaller than the target dimensions. """ if (oldWidth < targetWidth) and (oldHeight < targetHeight): return (int(oldWidth), int(oldHeight)) oldAspect = oldWidth/float(oldHeight) newAspect = targetWidth/float(targetHeight) if oldAspect > newAspect: newWidth = targetWidth newHeight = targetWidth/oldAspect return (int(newWidth), int(newHeight)) elif oldAspect < newAspect: newHeight = targetHeight newWidth = targetHeight*oldAspect return (int(newWidth), int(newHeight)) elif oldAspect == newAspect: return (int(targetWidth), int(targetHeight))
def nearlyEqual(x,y): """ Like within, but with the tolerance built in """ return abs(x-y)<.0001
def get_work_count(work): """ Get total work to do i.e. sum of work to do for every entry @type work: dict @param work: Dictionary of work to do keyed on entry name @rtype: int @return: Total work to do. """ count = 0 for entry in work: count += len(work[entry]) return count
def tuples_as_dict(tuples): """Transforms a list of tuples into a dict.""" dct = {} tuples = [x for x in tuples if len(x) == 2] for first, second in tuples: dct[first] = second return dct
def illegal_input(row): """ :param row: str, user input :return: boolean """ for i in range(len(row)): if i % 2 == 1: if row[i] != ' ': print('Illegal input') return False elif len(row) > 7: print('Illegal input') return False else: if row[i].isalpha is False: print('Illegal input') return False return True
def ps_convert_to_oneliner(psscript): """ Converts a PowerShell script to a one-liner. """ psscript = psscript.replace('"kernel32"', '`"kernel32`"') psscript = psscript.replace('"Kernel32.dll"', '`"Kernel32.dll`"') psscript = psscript.replace('"RtlMoveMemory"', '`"RtlMoveMemory`"') psscript = psscript.replace('"amsi.dll"', '`"amsi.dll`"') psscript = psscript.replace('"Amsi"', '`"Amsi`"') psscript = psscript.replace('"Scan"', '`"Scan`"') psscript = psscript.replace('"Buffer"', '`"Buffer`"') psscript = psscript.replace('@"', '"') psscript = psscript.replace('"@', '"') psscript = psscript.replace("\n", "") psscript = psscript.replace(" ", "") return psscript
def part2(data): """ >>> part2([['abcde','fghij']]) 1 >>> part2([['abcde','xyz','ecdab']]) 0 >>> part2(read_input()) 223 """ valid = 0 for words in data: words = [''.join(sorted(word)) for word in words] unique = set(words) if len(words) == len(unique): valid += 1 return valid
def get_id(object_name, __entries={}): """Look up and cache new unique IDs for the tables and connections.""" if object_name not in __entries: maxval = 0 if not __entries else max(__entries.values()) __entries[object_name] = 1 + maxval return __entries[object_name]
def html5_collapsible(summary, details) -> str: """Return nestable, collapsible <detail> tag for check grouping and sub- results.""" return f"<details><summary>{summary}</summary><div>{details}</div></details>"
def hamming(n): """Returns the nth hamming number""" hamming = [1] i = j = k = 0 while n: while hamming[i] * 2 <= hamming[-1]: i += 1 while hamming[j] * 3 <= hamming[-1]: j += 1 while hamming[k] * 5 <= hamming[-1]: k += 1 hamming.append(min(hamming[i] * 2, hamming[j] * 3, hamming[k] * 5)) n -= 1 return hamming[-2]
def mask(value, mask): """ >>> mask(11, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") 73 >>> mask(101, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") 101 >>> mask(0, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") 64 >>> mask(0, "1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") 34359738368 """ mask = list(mask) mask.reverse() for i, c in enumerate(mask): if c == 'X': continue bit = 2 ** i if c == '1': value |= bit else: value &= ~bit return value
def xerror(x, y): """ returns the error of x in regard to y """ return round (abs((x-y)/y) * 100, 2)
def _get_format(url): """Gives back the format of the file from url WARNING: It might break. Trying to find a better way. """ return url.split('.')[-1]
def cleanup_trailing_spaces(message): """We need to cleanup trailing spaces as Sublime text removes them.""" actual_msg_array = message.split('\n') actual_msg_array = [s.rstrip() for s in actual_msg_array] return '\n'.join(actual_msg_array)
def LogFiles(log_prefix): """Determines the filenames for the instmake log and the make output log by using an arbitrary prefix. Returns a tuple of the two filenames (instmake_log, make_log).""" imlog = log_prefix + ".imlog" mklog = log_prefix + ".make.out" return (imlog, mklog)
def fix(text): """Encodes a string as unicode and replaces difficult characters. Also checks for and removes things that will break the csv file like newline characters, commmas and quotes. Also adds quote characters around strings. If the given 'text' doesn't have an encode method (e.g. it is a number) then it is just returned as a string (using str).""" try: # Decode # clean = text.encode(encoding='UTF-8', errors='xmlcharrefreplace') # (compatible with py2.7 only) clean = text.encode('UTF-8', 'xmlcharrefreplace') # Replace bad characters for ch in [ '"' , ',' , '\n' ]: if ch in clean: clean=clean.replace(ch,"_") # replace with an underscore # Surround strings with quotes clean = '"'+clean+'"' return clean except AttributeError: return str(text)
def NLCS(sentence1, sentence2): """ Determine the length of the NLCS of two sentences """ # Get each individual word from each of the sentences. word1 = sentence1.split() word2 = sentence2.split() # Get the number of words from each sentence. l1 = len(word1) l2 = len(word2) # Initialize the nested list to store all the # subsequence similarity values a = [] for i in range(l1 + 1): l = [] for j in range(l2 + 1): l.append([]) a.append(l) for i in range(l1 + 1): for j in range(l2 + 1): # Nothing to compare initially if i == 0 or j == 0: a[i][j] = 0 # Matching words # Add 1 to the subsequence elif word1[i - 1] == word2[j - 1]: a[i][j] = a[i - 1][j - 1] + 1 # Words do not match # Get the maximum value of its previous neighbours else: a[i][j] = max(a[i-1][j], a[i][j-1]) # a[l1][l2] contains the length of the # longest common subsequence of X[0..n-1] & Y[0..m-1] lf = a[l1][l2]/(len((set(word1).union(set(word2))))) # lf is the length of the Normalized longest common subsequence return lf
def transpose(matrix): """ This function return the transponse of a matrix passed to it It takes O(n ^ 2) time --> quadratic run-time complexity :param matrix: :return new_matrix: """ new_matrix = [] for i in range(4): row = [] for j in range(4): row.append(matrix[j][i]) new_matrix.append(row) return new_matrix
def sortedSquaredArrayPointers(array): """ ### Description sortedSquaredArray takes an array and squares every number in and returns a new list sorted in ascending order. ### Parameters - array: The collection of numbers. ### Returns A new sorted list in an ascending way containing the squares of every number of the array param. """ squared = [0 for _ in array] start = 0 end = len(array) - 1 idx = len(array) - 1 while start <= end: if abs(array[start]) < abs(array[end]): squared[idx] = array[end] ** 2 end -= 1 else: squared[idx] = array[start] ** 2 start += 1 idx -= 0 return squared
def lRGB_ACEScg ( lRGB ): """ Generated using the XYZ Scaling on https://www.colour-science.org/apps/ """ R,G,B = lRGB return [ R * 0.6050374899 + G * 0.3297772590 + B * 0.0652703903 , R * 0.0693938279 + G * 0.9192626515 + B * 0.0113133072 , R * 0.0207546370 + G * 0.1074133069 + B * 0.8717796985 ]
def detect_blinks(eye_closure_list, fps): """ Returns the frames where blinks occured """ eye_cl_thresh = 50 # eye closure >= 50 to be considered closed eye_cl_consec_frames = 1 # 1 or more consecutive frames to be considered a blink counter = 0 # Array of frames where blink occured blink_timestamps = [] # Instantaneous blink rate (blink rate after every 2 secs) # blink rate = total number of blinks / time (in minutes) = blinks/minute total_blinks = 0 elapsed_seconds = 0 two_sec_save = 0 two_sec_tracker = 0 for frame_number, eye_thresh in enumerate(eye_closure_list): if eye_thresh is None: pass elif eye_thresh > eye_cl_thresh: counter += 1 else: if counter >= eye_cl_consec_frames: total_blinks += 1 # seconds = frame_number / fps # minutes = seconds / 60 # if minutes < 1: # minutes = 0 # blink_timestamps.append((minutes, seconds)) counter = 0 # convert processed frames to number of minutes elapsed_seconds = ((frame_number+1) / fps) # tracker to see if two secs have passed since blink rate was last captured two_sec_tracker = elapsed_seconds - two_sec_save # Goal is to capture blink rate every two seconds if two_sec_tracker >= 2: two_sec_save += two_sec_tracker two_sec_tracker = 0 blink_rate = total_blinks / elapsed_seconds # in blinks per second blink_timestamps.append(blink_rate) return blink_timestamps
def split_tag(chunk_tag): """ split chunk tag into IOBES prefix and chunk_type e.g. B-PER -> (B, PER) O -> (O, None) """ if chunk_tag == 'O': return ('O', None) return chunk_tag.split('-', maxsplit=1)
def merge_str(str1: str, str2: str, separator: str = "\n") -> str: """Join two strings together if by the separator. If either is empty then separator will not be used Arguments: str1 {str} -- Joined on left str2 {str} -- Joined on right Keyword Arguments: separator {str} -- Middle string if both input are non-empty (default: {'\n'}) Returns: str -- The joined str """ if len(str1) and len(str2): return str1 + separator + str2 return str1 or str2
def add_man_header(text: str, name, section, title) -> str: """ Add a header string to markdown that can be converted by Pandoc to a man page header. :param text: Original text.. :param name: Name for this manpage. :param section: Man pages section for this page. :param title: Title for this page. :return: Text with header added. """ # % cifra(1) | cifra usage documentation header = f"% {name}({section}) | {title}" new_text = "\n".join([header, text]) return new_text
def pow4(x, alpha, a, b, c): """pow4 Parameters ---------- x : int alpha : float a : float b : float c : float Returns ------- float c - (a*x+b)**-alpha """ return c - (a*x+b)**-alpha
def is_min_value(values): """Will return True if the last recent values of the given list of values describe a local minimum value. A local minimum is defined as followed: A > B < C :series: List of values :returns: True or False """ # Only take the last three values to check if we see a local # maximum. v = values[-3::] a = v[0] b = v[1] c = v[2] return a > b < c
def merge_args_with_kwargs(args_dict, kwargs_dict): """ Merge args with kwargs. """ ret = args_dict.copy() ret.update(kwargs_dict) return ret
def parse_bool_or_400(data, key, default=None): """ Parse the data[key] to boolean """ if key not in data: return default if isinstance(data[key], bool): return data[key] return data[key].lower() in ('true', '1')
def mock_choice_first_index(array): """ this will be used to replace the default random.choice method. We need a test method that will act consistently. args: array: figure it out! returns: the first item from the input array """ s = sorted(array) return s[0]
def remaining_vehicles(psvList): """ Get number of remaining searching vehicles. Args: psvList (list): List of parking search vehicle objects Returns: int: Number of remaining vehicles which are not parked """ return sum(1 for psv in psvList if not psv.is_parked())
def _or(*args): """Helper function to return its parameters or-ed together and bracketed, ready for a SQL statement. eg, _or ("x=1", _and ("a=2", "b=3")) => "(x=1 OR (a=2 AND b=3))" """ return " OR ".join(args)
def array_to_string(arr): """ Converts an array of objects to a comma separated string """ res = "" for part in arr: res = res + part + ", " if len(res) > 1: return res[:-2] else: return ""
def always(start_time, end_time): """is always start_day: YYYY-MM-DD or YYYY-MM-DD HH:mm:ss end_day: YYYY-MM-DD or YYYY-MM-DD HH:mm:ss """ if (start_time == '0000-00-00' and end_time == '0000-00-00') or \ (start_time == '0000-00-00 00:00:00' and \ end_time == '0000-00-00 00:00:00'): return True else: return False
def tira_espacos(str): """ -Retira todos os espacos existentes numa cadeia de caracteres Input: -str: cadeia de caracteres Output: -cod: cadeia de caracteres sem espacos entre palavras Funcoes externas usadas:-- Nota: usada na funcao digramas e no tipo Chave (f. auxiliar gera_ordem) """ res='' for char in str: if char!= ' ': res+=char return res
def new_user(name, email): """Adds a new user to the website""" user = { 'name': name, 'email': email, } return user
def MIN_CMP(x, y): """Return min comparison result.""" if not isinstance(x, (int, float)) or not isinstance(y, (int, float)): return 0 return y - x
def compute_av_stim_frame(experiments_stim_frames): """ Compute the average frame accross the flies were te stimulation starts and end """ av_stim_frame = {} for i, expe_stim_frame in enumerate(experiments_stim_frames): for stim in expe_stim_frame: if "on" in stim: if stim not in list(av_stim_frame.keys()): av_stim_frame[stim] = [expe_stim_frame[stim]] else: av_stim_frame[stim].append(expe_stim_frame[stim]) return av_stim_frame
def transform_basis_name(name): """ Transforms the name of a basis set to an internal representation This makes comparison of basis set names easier by, for example, converting the name to all lower case. """ name = name.lower() name = name.replace('/', '_sl_') name = name.replace('*', '_st_') return name
def flatten_instructions(recipeInstructions, field='text', has_title=False): """RecipeInstructions is a list of jsons with recipe steps. Field is the name of the text field within recipeInstructions.""" new_instructions = [] for instruction in recipeInstructions: text = instruction[field] if has_title: text = instruction['title'] + ': ' + text new_instructions.append(text) return new_instructions
def bin_pack(items, bin_size, bins=None): """Pack items in bins with size bin_size. We try all options first and then return the best one.""" bins = [] if bins is None else bins if not items: return bins item = items[0] solutions = [] for i, bin in enumerate(bins): if sum(bin) + item > bin_size: # Can't add to bin continue sbins = bins[:] sbins[i].append(item) # Add item to bin solutions.append(bin_pack(items[1:], bin_size, sbins)) # Open new bin solutions.append(bin_pack(items[1:], bin_size, bins + [[item]])) return min(solutions, key=len)
def calculateOffset(w, h, min_x, max_x, min_y, max_y): """ calculates offset of the svg path to position it center within the regions of canvas. """ offset_x = (w/2) - ((max_x - min_x)/2) offset_y = 0 - (h/2) - ((max_y - min_y)/2) return (offset_x, offset_y)
def GetNameForCustom(custom_cpu, custom_memory_mib): """Creates a custom machine type name from the desired CPU and memory specs. Args: custom_cpu: the number of cpu desired for the custom machine type custom_memory_mib: the amount of ram desired in MiB for the custom machine type instance Returns: The custom machine type name for the 'instance create' call """ return 'custom-{0}-{1}'.format(custom_cpu, custom_memory_mib)
def _dictionize(sub_dict): """ Create normal dictionaries from a sub_dictionary containing orderedDicts Parameters ---------- sub_dict : dict a dictionary with unlimited handling structure depth and types Returns ------- dict the same structure as `sub_dict` just with dicts instead of orderedDicts """ from collections import OrderedDict normalized_dict = dict() for key in sub_dict: if isinstance(sub_dict[key], OrderedDict): normalized_dict[key] = _dictionize(sub_dict[key]) elif isinstance(sub_dict[key], list): normalized_dict[key] = list() for element in sub_dict[key]: if isinstance(element, (list, dict, set)): normalized_dict[key].append(_dictionize(element)) else: normalized_dict[key] = sub_dict[key] else: normalized_dict[key] = sub_dict[key] return normalized_dict
def to_sub_passages(passages, qlen, max_seq_len): """ split passages that are too long into multiple passages :param passages: :param qlen: :param max_seq_len: :return: """ passages.sort(key=lambda p: sum([len(s[1]) for s in p])) sub_passages = [] for passage in passages: splen = 0 sub_passage = [] for si in range(len(passage)): sent = passage[si] # if this sentence will make the passage too long, stop adding and make a sub-passage if splen + len(sent[1]) + qlen >= max_seq_len: sub_passages.append(sub_passage) # the next sub-passage will include the prev sentence assert si > 0 # no two sentences should make a passage too long - ensured by max_sent_len sub_passage = [passage[si-1]] splen = len(passage[si-1][1]) sub_passage.append(sent) splen += len(sent[1]) sub_passages.append(sub_passage) return sub_passages
def error_msg(e_type, e_msg): """ creates an error message """ err_msg = "<[WARNING {0} ERROR {1}]>".format(str(e_type), str(e_msg)) return err_msg
def permutation(points, i, j): """ Switches two points and returns a new path. Echange deux points et retourne le nouveau circuit. @param points circuit @param i first index @param j second index (< len(points)) @return new circuit """ points = points.copy() points[i], points[j] = points[j], points[i] return points
def decode_message(A): """ Return the first half of the longest palindrome subsequence of string A """ dp={} n=len(A) ans="" for i in range(n): dp[(i,i)]=A[i] for i in range(n-1,-1,-1): #dp[(i,i)]=1 for j in range(i+1,n): if A[i] == A[j]: ans = ans + A[i] if(j==i+1): dp[(i,j)]=A[i]+A[j] else: dp[(i,j)]=A[i]+dp[(i+1,j-1)]+A[j] else: if len(dp[(i+1,j)]) > len(dp[(i,j-1)]): dp[(i,j)]=dp[(i+1,j)] else: dp[(i,j)]=dp[(i,j-1)] ans=dp[(0,n-1)] l=len(ans) #print(ans) return ans[0:(l+1)//2]
def _transpose(group): """ Given a list of 3-tuples from _grouper, return 3 lists. Also filter out possible None values from _grouper """ a, b, c = [], [], [] for g in group: # g can be None if g is not None: x, y, z = g #if x is not None and y is not None and z is not None: a.append(x) b.append(y) c.append(z) return a, b, c
def uniquec(l): """Count the instances of the uniqued integers in l. Args: l (list[int]): list of integers. Returns: list[tuple]: list of (n, count(n)) for every n in unique(l). """ # Possible integer values possible = range(max(l) + 1) # Count elements occurrences counts = [0 for i in possible] for n in l: counts[n] += 1 # Return tupled counts return [(i, counts[i]) for i in possible if counts[i]]
def linear(F_CH4, ratio=0.15): """Calculates radiative forcing from oxidation of methane to H2O. Stratospheric water vapour forcing follows a practically linear relationship with the CH4 radiative forcing in MAGICC and AR5. """ F_H2O = ratio * F_CH4 return F_H2O
def _format_firewall_stdout(cmd_ret): """ Helper function to format the stdout from the get_firewall_status function. cmd_ret The return dictionary that comes from a cmd.run_all call. """ ret_dict = {"success": True, "rulesets": {}} for line in cmd_ret["stdout"].splitlines(): if line.startswith("Name"): continue if line.startswith("---"): continue ruleset_status = line.split() ret_dict["rulesets"][ruleset_status[0]] = bool(ruleset_status[1]) return ret_dict
def smooth_kspace(k,R,mu,beta): """Smooth k-space window function.""" y=1/(1+(mu*k*R/2.50)**(beta*3.12)) return(y)
def compute_georange(geomean, geosd, count): """Compute the geometric range of one geometric standard deviation around the geometric mean. Return the geometric range.""" georange = 0.0 if count > 0: if geosd > 0.0: georange = geomean * geosd - geomean / geosd else: georange = 0.0 return georange
def get_avg_sentiment(sentiment): """ Compiles and returnes the average sentiment of all titles and bodies of our query """ average = {} for coin in sentiment: # sum up all compound readings from each title & body associated with the # coin we detected in keywords average[coin] = sum([item['compound'] for item in sentiment[coin]]) # get the mean compound sentiment if it's not 0 if average[coin] != 0: average[coin] = average[coin] / len(sentiment[coin]) return average
def pad_diff(actual_height, actual_width, desired_shape): """ Pads img_arr width or height < samples_size with zeros """ h_diff = desired_shape - actual_height w_diff = desired_shape - actual_width padding = (0, 0, w_diff, h_diff) # left, top, right, bottom return padding
def check_fields(fields: list, data: dict): """Check if given dict :data contain given list of keys from list :fields""" for field in fields: if field not in data: return f"{field.title()} is not set" return None
def is_str(l): """returns True if `l` is a string, False if not""" return type(l) in [str, bytes]
def get_landing_url(sending_profile): """Get url for landing page.""" return f"http://{sending_profile['landing_page_domain']}"
def flatten_multidimensional_list(list_of_lists): """ This function is used to flatten a multidimensional list into a single list. :return: a multidimensional list that has been flattened :rtype: list """ if len(list_of_lists) == 0: return list_of_lists if isinstance(list_of_lists[0], list): return flatten_multidimensional_list(list_of_lists[0]) + flatten_multidimensional_list(list_of_lists[1:]) return list_of_lists[:1] + flatten_multidimensional_list(list_of_lists[1:])
def get_only_first_stacktrace(lines): """Get the first stacktrace because multiple stacktraces would make stacktrace parsing wrong.""" new_lines = [] for line in lines: line = line.rstrip() if line.startswith('+----') and new_lines: break # We don't add the empty lines in the beginning. if new_lines or line: new_lines.append(line) return new_lines
def _ParseIssueReferences(issue_ref_list): """Parses a list of issue references into a tuple of IDs added/removed. For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ]) NOTE: We don't support cross-project issue references. Rather we just assume the issue reference is within the same project. """ added = [] removed = [] for proj in issue_ref_list: parts = proj.split(":") proj_id = parts[1] if len(parts) >= 2 else proj[1:] if proj[0] != "-": added.append(proj_id) else: removed.append(proj_id) return added, removed
def key2freq(n: int) -> float: """ Gives the frequency for a given piano key. Args: n: The piano key index Returns: The Frequency """ return 440 * 2 ** ((n - 49) / 12)
def _tensor_name_base(full_tensor_name): """Removes the device assignment code from a tensor. e.g. _tensor_name_base("foo:3") => "foo" Args: full_tensor_name: A tensor name that is annotated with a device placement (this is what tensor flow introspection gives). Returns: A name without any device assignment. """ if full_tensor_name.startswith("^"): return full_tensor_name[1:] return full_tensor_name.split(":")[0]
def getFileExtension(filename): """------------------------------------------------------------------- Function: [getFileExtension] Description: Fetches the latest chapter directly from the series's host Input: [series_code] The identifying series code [host_entry] The HostTable entry associated with this series Return: Latest chapter number ------------------------------------------------------------------ """ if not '.' in filename: return None return filename.split('.')[-1]
def is_dict(obj): """ Check if an object is a dict. """ return isinstance(obj, dict)
def isRed(x): """ A helper function to determine if the link is red or not Parameters:\n x: a node in the tree """ if x is None: return False return x.color == True