content
stringlengths
42
6.51k
def get_factor(units, unit_id): """ Returns the factor of a Unit-Config """ if not units: return None for unit in units: if unit.id == unit_id: if unit.to_base_function: return unit.to_base_function return unit.factor
def monitor_queue(cfg, log): """Function Stub: monitor_queue Description: This is a function stub for rmq_metadata.monitor_queue Arguments: cfg -> Stub argument holder. log -> Stub argument holder. """ status = True if cfg and log: status = True return status
def _entity_to_human(x): """ Map the entity into a human readable string. Accepted types: - Built-in types (str, int, double, float) - List - Pandas Dataframe """ if 'pandas.core.frame.DataFrame' in str(type(x)): return x elif 'pandas.core.series.Series' in str(type(x)): return x elif 'pandas.core.indexes.base.Index' in str(type(x)): return [c for c in x] else: return x
def time_to_float(time): """ Converts a time in human-readable time to a float 01:03.55 -> (float) 63.55 Args: time (str): The time to be converted Returns: float: The time in seconds """ try: return float(time) except ValueError: if time is None or "dq" in time.lower(): return None return int(time.split(":")[0]) * 60 + float(time.split(":")[1])
def load_from_file(fname): """The method reloads a new stopwords list. Note: Internal stopword is overwritten. Args: fname (:obj:`str`): a file path string Returns: (:obj:`set`): The list of terms """ try: with open(fname, "r") as f: return list(f.read().strip().split("\n")) except EnvironmentError: print('File access error at {}, data loading is skipped.'.format(fname)) return []
def down_generalized(element, priorities, node, nbr_functions, max_values): """ Computes the largest m = [m_1, ..., m_k] such that up(m, priorities) <= m' = element[1:]. Then we add node to obtain [node, m]. When computing down, priorities is a tuple of size k which gives the encountered priority according to each priority function. Max_values records the maximum value to know when a memory value is not defined. """ # print(element, priorities, node, nbr_functions, max_values) # resulting node res = [0] * (nbr_functions + 1) res[0] = node # for each priority function (numbered from 1 to k) for func in range(1, nbr_functions + 1): encountered_priority_p = priorities[func - 1] # if priority encountered is even if encountered_priority_p % 2 == 0: m_prim = element[func] if encountered_priority_p < m_prim: res[func] = m_prim else: res[func] = max(encountered_priority_p - 1, 0) else: m_prim = element[func] if encountered_priority_p <= m_prim: res[func] = m_prim else: if encountered_priority_p != max_values[func - 1]: res[func] = encountered_priority_p + 1 else: return -1 return res
def get_tuple_version(hexversion): """Get a tuple from a compact version in hex.""" h = hexversion return(h & 0xff0000) >> 16, (h & 0xff00) >> 8, h & 0xff
def easy_unpack(elements): """ returns a tuple with 3 elements - first, third and second to the last """ # your code here res = [] res.append([elements[0],elements[2],elements[-2]]) rest = tuple(res[0]) return rest
def format_scope_name(scope_name, prefix, suffix): """ Add a predix and a suffix to a scope name. """ if prefix is not "": if not prefix[-1] == "/": prefix += "/" if suffix is not "": if not suffix[0] == "/": suffix = "/" + suffix return prefix + scope_name + suffix
def get_dict_properties(item, fields, mixed_case_fields=None, formatters=None): """Return a tuple containing the item properties. :param item: a single dict resource :param fields: tuple of strings with the desired field names :param mixed_case_fields: tuple of field names to preserve case :param formatters: dictionary mapping field names to callables to format the values """ if mixed_case_fields is None: mixed_case_fields = [] if formatters is None: formatters = {} row = [] for field in fields: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') data = item[field_name] if field_name in item else '' if data == '<nil>': data = '' if field in formatters: data = formatters[field](data) row.append(data) return tuple(row)
def closest_values(L): """Closest values :param L: list of values :returns: two values from L with minimal distance :modifies: the order of L :complexity: O(n log n), for n=len(L) """ assert len(L) >= 2 L.sort() valmin, argmin = min((L[i] - L[i - 1], i) for i in range(1, len(L))) return L[argmin - 1], L[argmin]
def intround(f: float) -> int: """Rounds float and converts it to int """ return int(round(f))
def sum_of_series(first_term, common_diff, num_of_terms): """ Find the sum of n terms in an arithmetic progression. >>> sum_of_series(1, 1, 10) 55.0 >>> sum_of_series(1, 10, 100) 49600.0 """ sum = ((num_of_terms/2)*(2*first_term+(num_of_terms-1)*common_diff)) # formula for sum of series return sum
def _allowed_uri_format(rv: str) -> bool: """Check that a URI format doesn't have another resolver in it.""" return ( not rv.startswith("https://identifiers.org") and not rv.startswith("http://identifiers.org") and "n2t.net" not in rv and "purl.bioontology.org" not in rv )
def get_iou(bb1, bb2): """ Taken from: https://stackoverflow.com/questions/25349178/calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation Calculate the Intersection over Union (IoU) of two bounding boxes. Parameters ---------- bb1 : dict Keys: {'x1', 'x2', 'y1', 'y2'} The (x1, y1) position is at the top left corner, the (x2, y2) position is at the bottom right corner bb2 : dict Keys: {'x1', 'x2', 'y1', 'y2'} The (x, y) position is at the top left corner, the (x2, y2) position is at the bottom right corner Returns ------- float in [0, 1] """ #print bb1['x1'], bb1['x2'] assert bb1['x1'] <= bb1['x2'] #print bb1['y1'], bb1['y2'] assert bb1['y1'] <= bb1['y2'] #print bb2['x1'], bb2['x2'] assert bb2['x1'] <= bb2['x2'] #print bb2['y1'], bb2['y2'] assert bb2['y1'] <= bb2['y2'] # determine the coordinates of the intersection rectangle x_left = max(bb1['x1'], bb2['x1']) y_top = max(bb1['y1'], bb2['y1']) x_right = min(bb1['x2'], bb2['x2']) y_bottom = min(bb1['y2'], bb2['y2']) if x_right < x_left or y_bottom < y_top: return 0.0 # The intersection of two axis-aligned bounding boxes is always an # axis-aligned bounding box intersection_area = (x_right - x_left) * (y_bottom - y_top) # compute the area of both AABBs bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1']) bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1']) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area if bb1_area == bb2_area == 0: return 1.0 iou = intersection_area / float(bb1_area + bb2_area - intersection_area) #print iou, bb1_area, bb2_area, intersection_area assert iou >= 0.0 assert iou <= 1.0 return iou
def sum_powers_in_range(m, n, p): """ What comes in: Non-negative integers m and n, with n >= m, and a number p. What goes out: the sum m**p + (m+1)**p + (m+2)**p + ... + n**p for the given numbers m, n and p. The latter may be any number (possibly a floating point number, and possibly negative). Side effects: None. Example: -- sum_powers_in_range(3, 100, 0.1) returns about 142.384776 """ # ------------------------------------------------------------------ # Done: 5. Implement and test this function. # Note that you should write its TEST function first (above). # # No fair running the code of sum_powers_in_range to GENERATE # test cases; that would defeat the purpose of TESTING! # ------------------------------------------------------------------ total = 0 for k in range((n + 1) - m): total = total + (m + k) ** p return total
def _sort_key_max_confidence_sd(sample, labels): """Samples sort key by the maximum confidence_sd.""" max_confidence_sd = float("-inf") for inference in sample["inferences"]: if labels and inference["label"] not in labels: continue confidence_sd = inference.get("confidence_sd", float("-inf")) if confidence_sd > max_confidence_sd: max_confidence_sd = confidence_sd return max_confidence_sd
def truncatechars(s, n, ellipsis='...'): """ Truncates the string `s` to at most `n` characters. """ if len(s) > n: return s[:n - 3].rstrip(' .,;:?!') + ellipsis else: return s
def poly2xywha(cx, cy, width, height, theta): """ Check the angle in the OPENCV format for problems and record and change them """ if theta == 0: theta = -90 tmp = width width = height height = tmp if width != max(width, height): # width is not the longest edge theta = theta - 90 return cx, cy, height, width, theta else: # width is the longest edge return cx, cy, width, height, theta
def create_pipeline(entry, _exit, stages): """Create pipeline given an entry point. Args: entry: a string representing a stage in start at. _exit: a string representing the stage to stop. stages: a list of stages in order of the general pipeline. Returns: A list of procedures to run. """ stage_names = list(stages.keys()) start = stage_names.index(entry) end = stage_names.index(_exit) return stage_names[start : (end + 1)]
def capwords(phrase): """ Capitalize each word in a string. A word is defined as anything with a space separating it from the next word """ lowercase_only = ('via',) uppercase_only = ('CX',) capitalized = [word.capitalize() for word in phrase.split(' ')] capitalized = [word.lower() in lowercase_only and word.lower() or word for word in capitalized] capitalized = [word.upper() in uppercase_only and word.upper() or word for word in capitalized] return ' '.join(capitalized)
def absolute_difference_distance(x: float, y: float) -> float: """Calculate distance for `get_anomalies_density` function by taking absolute value of difference. Parameters ---------- x: first value y: second value Returns ------- result: float absolute difference between values """ return abs(x - y)
def improve_data(data_list: list, non_integers: list) -> list: """Takes a list of dictionaries containting the data, make sure all dictionaries have the correct data, 0's all missing entries Args: data_list (list): list of dictionaries containing the data non_integers (list): list of headers which should not be converted to an integer Returns: list: improved list of data """ headers = list(data_list[0].keys()) for data in data_list: for header in headers: if data[header] == '': data[header] = '0' for data in data_list: for header in headers: if header not in non_integers: data[header] = str(data[header]) data[header] = data[header].replace('*', '') data[header] = float(data[header]) return data_list
def fit_frames(totalsize, framesize, stepsize=None): """ Calculates how many frames of 'framesize' fit in 'totalsize', given a step size of 'stepsize'. Parameters ---------- totalsize: int Size of total framesize: int Size of frame stepsize: int Step size, defaults to framesize (i.e. no overlap) Returns a tuple (nframes, newsize, remainder) """ if ((totalsize % 1) != 0) or (totalsize < 1): raise ValueError("invalid totalsize (%d)" % totalsize) if ((framesize % 1) != 0) or (framesize < 1): raise ValueError("invalid framesize (%d)" % framesize) if framesize > totalsize: return 0, 0, totalsize if stepsize is None: stepsize = framesize else: if ((stepsize % 1) != 0) or (stepsize < 1): raise ValueError("invalid stepsize") totalsize = int(totalsize) framesize = int(framesize) stepsize = int(stepsize) nframes = ((totalsize - framesize) // stepsize) + 1 newsize = nframes * stepsize + (framesize - stepsize) remainder = totalsize - newsize return nframes, newsize, remainder
def _clear_context(request): """clears context of all keys """ context = {} return context
def manhattan_distance(pos1, pos2): """Returns manhattan distance between two points in (x, y) format""" return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])
def probe_id(subscription_id, resource_group_name, appgateway_name, name): """Generate the id for a probe""" return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/probes/{3}'.format( subscription_id, resource_group_name, appgateway_name, name )
def _is_tcp_synack(tcp_flags): """ Passed a TCP flags object (hex) and return 1 if it contains TCP SYN + ACK flags and no other flags """ if tcp_flags == 0x12: return 1 else: return 0
def __correct_superbowl(s): """Superbowl is one word, dammit.""" if not s: return s return s.replace("Super Bowl", "Superbowl").replace("SuperBowl", "Superbowl").rstrip(": ")
def replace(line_val, seq): """ Replace special characters :param line_val: line as string :param seq: sequence number :return: changed value """ # replace TAB to ',' rtn = line_val.replace('\t', ',') # remove line break rtn = rtn.replace('\r\n', '') rtn = rtn.replace('\n', '') rtn = rtn.replace('NULL', '0') # verify if there is a 'NULL' or 'ZZ' row _tmp_lst = rtn.split(',') for _tmp in _tmp_lst: _tmp = _tmp.strip().upper() if 'ZZ' == _tmp: rtn = None break if rtn is not None: _tmp_lst.append(str(seq)) rtn = ','.join(_tmp_lst) rtn = rtn + '\n' return rtn
def validate_ref_dataset_required(ref_dataset: int) -> int: """ Validates the reference to a dataset of an object. :param ref_dataset: The reference to a dataset of the object. :return: The validated reference to a dataset. """ if ref_dataset is None: raise ValueError("Reference to a dataset must not be empty.") if ref_dataset <= 0: raise ValueError("Reference to a dataset must be positive.") return ref_dataset
def largest_negative_number(seq_seq): """ Returns the largest NEGATIVE number in the given sequence of sequences of numbers. Returns None if there are no negative numbers in the sequence of sequences. For example, if the given argument is: [(30, -5, 8, -20), (100, -2.6, 88, -40, -5), (400, 500) ] then this function returns -2.6. As another example, if the given argument is: [(200, 2, 20), (500, 400)] then this function returns None. Preconditions: :type seq_seq: (list, tuple) and the given argument is a sequence of sequences, where each subsequence contains only numbers. """ # ------------------------------------------------------------------------- # DONE: 5. Implement and test this function. # Note that you should write its TEST function first (above). # # CHALLENGE: Try to solve this problem with no additional sequences # being constructed (so the SPACE allowed is limited to the # give sequence of sequences plus any non-list variables you want). # ------------------------------------------------------------------------- largest = 0 for k in range(len(seq_seq)): for j in range(len(seq_seq[k])): if seq_seq[k][j] < 0 and largest == 0: largest = seq_seq[k][j] if seq_seq[k][j] < 0 and seq_seq[k][j] > largest: largest = seq_seq[k][j] if largest != 0: return largest
def flatten_list(_2d_list): """ Flatten a list of lists. Code from: https://stackabuse.com/python-how-to-flatten-list-of-lists/ """ flat_list = [] # Iterate through the outer list for element in _2d_list: if type(element) is list: # If the element is of type list, iterate through the sublist for item in element: flat_list.append(item) else: flat_list.append(element) return flat_list
def create_validation_report(package_name): """ Creates a new ODP resources containing information about the dataset per se :return: """ valrep_dict = { "description": "Validation report", "format": "application/pdf", "package_id": package_name, "url": "http://data.europa.eu/euodp/en/data/dataset/{}/Validation_report.pdf".format(package_name), "resource_type": "http://data.europa.eu/euodp/kos/documentation-type/RelatedDocumentation" } return valrep_dict
def build_arguments(path="/tmp"): """builds the arguments for the CaptureInfo Args: path (str): path to the file Returns: dict: arguments to build the Capture Info """ return dict(path=path)
def remove_none_items(adict): """Return a similar dict without keys associated to None values""" return {k: v for k, v in adict.items() if v is not None}
def transpose(A, mul_factor=1): """The transpose of the matrix. Args ---- A (compulsory) A matrix. mul_factor (int/float, optional) Multiplication factor of the matrix. Defaults to 1. Returns ------- Matrix The transpose of the given matrix multiplied by the multiplication factor. """ matrix = [] for i in range(len(A[0])): matrix.append([]) for j in range(len(A)): matrix[i].append(A[j][i] * mul_factor) return matrix
def slicestr(value, sliceamount): """ Returns the fraction of the string, specified in ``sliceamount``. {{some_string|slicestr:"5:9"}} will return `onar` if ``some_string`` was "dictionary" """ start, end = sliceamount.split(':') return value[int(start):int(end)]
def partition_url(path_info): """ Returns sections of a request url path. """ return [part for part in path_info.split('/') if part != '']
def add_padding(bbox, padding_pct): """ Add the given percentage padding to the given bounding box. """ min_lat = bbox[1] max_lat = bbox[3] min_lon = bbox[0] max_lon = bbox[2] lat_pad = ((max_lat - min_lat) / 100) * padding_pct lon_pad = ((max_lon - min_lon) / 100) * padding_pct bbox = (min_lon - lon_pad, min_lat - lat_pad, max_lon + lon_pad, max_lat + lat_pad) return bbox
def is_file_image(filename): """Return if a file's extension is an image's. Args: filename(str): file path. Returns: (bool): if the file is image or not. """ img_ex = ['jpg', 'png', 'bmp', 'jpeg', 'tiff'] if '.' not in filename: return False s = filename.split('.') if s[-1].lower() not in img_ex: return False if filename.startswith('.'): return False return True
def percentage(reviewed, voted): """ Returns the percentage of voted contributions. """ try: return 100.0 * voted / reviewed except ZeroDivisionError: return 100.0
def string_SizeInBytes(size_in_bytes): """Make ``size in bytes`` human readable. Doesn"t support size greater than 1000PB. Usage:: >>> from __future__ import print_function >>> from weatherlab.lib.filesystem.windowsexplorer import string_SizeInBytes >>> print(string_SizeInBytes(100)) 100 B >>> print(string_SizeInBytes(100*1000)) 97.66 KB >>> print(string_SizeInBytes(100*1000**2)) 95.37 MB >>> print(string_SizeInBytes(100*1000**3)) 93.13 GB >>> print(string_SizeInBytes(100*1000**4)) 90.95 TB >>> print(string_SizeInBytes(100*1000**5)) 88.82 PB """ res, by = divmod(size_in_bytes, 1024) res, kb = divmod(res, 1024) res, mb = divmod(res, 1024) res, gb = divmod(res, 1024) pb, tb = divmod(res, 1024) if pb != 0: human_readable_size = "%.2f PB" % (pb + tb / float(1024)) elif tb != 0: human_readable_size = "%.2f TB" % (tb + gb / float(1024)) elif gb != 0: human_readable_size = "%.2f GB" % (gb + mb / float(1024)) elif mb != 0: human_readable_size = "%.2f MB" % (mb + kb / float(1024)) elif kb != 0: human_readable_size = "%.2f KB" % (kb + by / float(1024)) else: human_readable_size = "%s B" % by return human_readable_size
def convert_ra2deg(hh, mm, ss): """ Convert measurement of right ascension in hh:mm:ss to decimal degrees Parameters ---------- hh : int -or- string -or- float hours component of right ascension mm : int -or- string -or- float minutes component of right ascension ss : int -or- string -or- float seconds component of right ascension Returns ------- result : float Right ascension in degrees """ if isinstance(hh, str): hh = float(hh) if isinstance(mm, str): mm = float(mm) if isinstance(ss, str): ss = float(ss) rahh = hh + mm / 60. + ss / 3600. return rahh * 360. / 24.
def remove_hidden(names): """Remove (in-place) all strings starting with a '.' in the given list.""" i = 0 while i < len(names): if names[i].startswith('.'): names.pop(i) else: i += 1 return names
def merge_contour_lists(*args): """Merges two or more dictionaries as given by contours_around_labels into a single dictionary.""" from itertools import islice if len(args) == 0: return {} d = args[0].copy() for a in islice(args, 1, None): for k,v in a.iteritems(): d.setdefault(k, []).extend(v) return d
def user_type_udf(user): """Converts user type to 'Subscriber' or 'Customer'.""" if not user: return None if user.lower().startswith("sub"): return "Subscriber" elif user.lower().startswith("cust"): return "Customer"
def build_empty_bbox_mask(bboxes): """ Generate a mask, 0 means empty bbox, 1 means non-empty bbox. :param bboxes: list[list] bboxes list :return: flag matrix. """ flag = [1 for _ in range(len(bboxes))] for i, bbox in enumerate(bboxes): # empty bbox coord in label files if bbox == [0,0,0,0]: flag[i] = 0 return flag
def get_output_string(output): """Return a Jupyter cell's output as a single string; or None.""" lines = [] lines.extend(output.get('text', [])) lines.extend(output.get('data', {}).get('text/plain', [])) return ''.join(lines)
def group_dict_by_value(d: dict) -> dict: """ Group a dictionary by values. Parameters ---------- d : dict Input dictionary Returns ------- dict Output dictionary. The keys are the values of the initial dictionary and the values ae given by a list of keys corresponding to the value. >>> group_dict_by_value({2: 3, 1: 2, 3: 1}) {3: [2], 2: [1], 1: [3]} >>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3}) {3: [2, 12], 2: [1], 1: [3, 10]} """ d_out = {} for k, v in d.items(): if v in d_out: d_out[v].append(k) else: d_out[v] = [k] return d_out
def getResponse(answ, *line) : """This function searches for a specific answer in the string. It takes a string argument. It may take another str(line) as an argument. It returns a boolean value. """ if not len(line) >0 : #input line to search line=input("What do you do? : ") #search line if answ.lower() in line : return True else : return False
def bigger_price(limit, data): """ TOP most expensive goods """ new_list = [] while limit > 0: max_price = 0 max_product = {} for value in data: if value['price'] > max_price: max_price = value['price'] max_product = value else: pass new_list.append(max_product) data.remove(max_product) limit -= 1 return new_list
def transpose(G): """Transpose graph.""" V, E = G; ET = {} for u in V: ET[u] = [] for u, vs in E.items(): for v in vs: ET[v].append(u) return (V, ET)
def view_data_url(expression_accession): """Creates the url to view the data on the ebi's single cell expression atlas browser""" url = "https://www.ebi.ac.uk/gxa/sc/experiments/%s/Results" \ % expression_accession return url
def XOR(v1, v2): """ XOR operation element by element from 2 lists :param v1: [1, 0, 1, 0, 0, 1] :param v2: [1, 1, 0, 0, 1, 1] :return: [0, 1, 1, 0, 1, 0] """ return [a ^ b for a, b in zip(v1, v2)]
def guessFolderName(repoUrl): """This funcation return expected folder name after clone repository.""" return repoUrl.split('/')[-1]
def generate_gcp_project_link(project_id: str) -> str: """Generates a Slack markdown GCP link from the given `project_id`. Args: project_id: The project ID of which to hyperlink. Returns: The generated hyperlink. """ return ('<https://console.cloud.google.com/home/' + f'dashboard?project={project_id}|{project_id}>')
def check_overlap(x, y): """Check two span whether overlap or not Args: x (Tuple[int, int]): start, end including position of span x y (Tuple[int, int]): start, end including position of span y x: (3, 4), y: (4, 5) -> True x: (3, 3), y: (4, 5) -> False Returns: bool: two span whether overlap or not """ # x start > y end or y start > x end, no overlap if x[0] > y[1] or y[0] > x[1]: return False else: return True
def end_file(current_output): """End an output file. This is smart enough to do nothing if current_output is None. @param current_output: the current file output (or None). @returns: None, to represent the closed stream. """ if current_output: # write the iati-activities end tag current_output.write("</iati-activities>\n") # close the output current_output.close() return None
def obtener_nombre_pieza(simbolo): """ (str) -> str >>> obtener_nombre_pieza('p') 'Peon blanco' >>> obtener_nombre_pieza('R') 'Rey Negro' Retorna el nombre de la pieza del ajedrez dado su simbolo :param simbolo: la representacion de la pieza segun el enunciado :return: El nombre y color de la pieza """ tipo = 'Negro' if simbolo.islower(): tipo = 'blanco' retorno = simbolo.lower() if retorno == 'p': return 'Peon '+tipo elif retorno == 't': return 'Torre ' + tipo elif retorno == 'k': return 'Caballo ' + tipo elif retorno == 'a': return 'Alfil ' + tipo elif retorno == 'q': return 'Reina ' + tipo elif retorno == 'r': return 'Rey ' + tipo else: return 'No es una pieza'
def _check_arg(arg): """Checks if the argument is True bool or string meaning True""" true_strings = ('True', 'true', 'T', 't', '.true.', '.True.') if arg is None: return 'n' else: if isinstance(arg, bool): if arg: return 'y' else: return 'n' if isinstance(arg, str): if arg in true_strings: return 'y' else: return 'n' return 'add'
def _solve_integral_equations_LUT(discrimination, ratio, _, __, interpolate_function): """Solve single sigmoid integral for difficulty parameter using Look Up Table. """ return interpolate_function(discrimination, ratio)
def rescue_default(callback, default=""): """Call callback. If there's an exception, return default. It's convenient to use lambda to wrap the expression in order to create a callback. Only catch IndexError, AttributeError, and ValueError. """ try: return callback() except (IndexError, AttributeError, ValueError): return default
def multiply(x,y): """ Karatsuba algorithm for multiplication """ if len(str(x)) == 1 or len(str(y)) == 1: # last step in recursion, now go back return x * y else: n = max(len(str(x)),len(str(y))) nby2 = n / 2 a = x / 10**(nby2) b = x % 10**(nby2) c = y / 10**(nby2) d = y % 10**(nby2) ac = multiply(a,c) bd = multiply(b,d) ad_plus_bc = multiply(a+b,c+d) - ac - bd # this little trick, writing n as 2*nby2 takes care of both even and odd n prod = ac * 10**(2*nby2) + (ad_plus_bc * 10**nby2) + bd return prod
def bfs(x, n, res): """ The stepping number is generated from existing stepping numbers for ex: 1 generates 12, 10 2 generates 21, 23 """ q = [] q.append(x) while q: p = q.pop(0) if p <= n: res.append(p) d = p%10 if d == 0: q.append(p*10+(d+1)) elif d == 9: q.append(p*10+(d-1)) else: q.append(p*10+(d+1)) q.append(p*10+(d-1)) return res
def is_expected_path(text: str) -> bool: """ Check if any dots appear outside of curly braces, if any. This is assuming there are no nested curly braces. """ in_braces = False for c in text: if c == "{": in_braces = True elif c == "}": in_braces = False elif c == "." and not in_braces: return False return True
def normalize(x, possible_values): """Taxes a parameter and returns a corresponding value [0, 1]""" assert x in possible_values min_x = min(possible_values) max_x = max(possible_values) z = (x - min_x) / (max_x - min_x) assert z >= 0 assert z <= 1 return z
def aes_unpad(s, block_size=32, padding='{'): """ Removes padding to get the value from @s for AES decryption @s: #str being AES encrypted or decrypted @block_size: the AES block size @padding: character to pad with -> unpadded #str .. from vital.security import aes_pad aes_unpad("swing{{{{{{{{{{{{{{{{{{{{{{{{{{{") # -> 'swing' .. """ return s.rstrip(padding)
def cast_to_type(obj, out_type): """Cast obj to out_type if it's not out_type already. If the obj happens to be out_type already, it just returns obj as is. Args: obj: input object out_type: type. Returns: obj cast to out_type. Usual python conversion / casting rules apply. """ in_type = type(obj) if out_type is in_type: # no need to cast. return obj else: return out_type(obj)
def is_false(str_value: str) -> bool: """Returns True if string represents False value else return True. :param str_value: String to evaluate. """ return str_value.lower() in ("false", "no", "0", "null", "none", "::ixnet::obj-null")
def dict_keyword_access(d, key): """:yaql:operator . Returns dict's key value. :signature: left.right :arg left: input dictionary :argType left: mapping :arg right: key :argType right: keyword :returnType: any (appropriate value type) .. code:: yaql> {a => 2, b => 2}.a 2 """ return d.get(key)
def filter_directories(path_list): """Given a list of paths, return only those that are directories""" return [p for p in path_list if p.is_dir()]
def _prepare_sig_units(sig, units=False): """ Remove brackest from shortcut, if any """ if '[' in sig: while '[' in sig: sig = sig[:sig.index('[')] + sig[sig.index(']')+1:] return sig
def format_for_type(format, type): """ Returns the format appropriate to the given type Parameters ---------- format: str or dict If it is str, just return it. Dict should has the form { type : format_for_the_type } + { None : default_format } """ if isinstance(format, dict): if type in format: return format[type] return format[None] return format
def add_str(x, y): """Handle string addition :Example: add_str('11', '01') => '12' """ return str(int(x) + int(y)).zfill(len(x))
def chunk_list(list_: list, size: int) -> list: """Take a list `list_` and break it down into a list of lists containing `size` elements per list. :param list_: A list of elements to be chunked. :type list_: list :param size: The number of elements per chunk. :type size: int :return: A list of chunks containing `size` elements per chunk. """ return [ list_[index:index + size] for index in range(0, len(list_), size) ]
def word_count(phrase): """ Count occurrences of each word in a phrase excluding punctuations""" punctuations = '''!()-[]{};:"\<>./?@#$%^&*~''' counts = dict() no_punct = "" for char in phrase: if char not in punctuations: no_punct = no_punct + char no_punct = no_punct.replace('_', ' ') no_punct = no_punct.replace(',', ' ') for word in no_punct.lower().split(): if word in counts: counts[word] += 1 else: counts[word] = 1 return counts
def get_tags_from_file( lines ): """ Extract the tags from a file and return them as a list """ tags_header = "##$$##" for i, line in enumerate(lines): if tags_header in line: break else: # didn't break out of loop so no tags header return [] tags_line = lines[i+1] just_tags = (''.join(ch for ch in tags_line if (ch.isalnum()) or ch == ',')).split(',') return [word.strip() for word in just_tags]
def metric_failure_default(metric, failure_value="AUTO"): """Returns the default failure value for the specified metric.""" if failure_value != "AUTO": return failure_value switcher = { "chainsActive": "-1", "type": "", "typeFull": "", } return switcher.get(metric, "0")
def isiterable(var): """ Check if input is iterable. """ return hasattr(var, "__iter__")
def inc_byte(byte, pct): """increments byte by percentage""" if byte == 0: return (pct * 255) / 100 byte += byte * pct / 100 return min(byte, 255)
def get_goid(go_cnt, max_val): """Get frequently used GO ID.""" if max_val is not None: for goid, cnt in go_cnt: if cnt < max_val: return goid, cnt return go_cnt[-1][0], go_cnt[-1][1] return go_cnt[0][0], go_cnt[0][1]
def convertToMap(listOfDict): """ Converts a list of dictionary entries to a std::map initialization list. """ listOfDict = listOfDict.replace('[', '{') listOfDict = listOfDict.replace(']', '}') listOfDict = listOfDict.replace(':', ',') return listOfDict
def crc32(string: str) -> str: """Return the standard CRC32 checksum as a hexidecimal string.""" import binascii return "%08X" % binascii.crc32(string.encode())
def clean_url(url): """ Strips the ending / from a URL if it exists. Parameters ---------- url : string HTTP URL Returns ------- url : string URL that is stripped of an ending / if it existed """ if url[-1] == '/': return url[:-1] return url
def generate_doc_link(path): """ Generate cost management link to a given path. Args: (String) path - path to the documentation. """ return f"https://access.redhat.com/documentation/en-us/cost_management_service/2021/{path}"
def get_higher_value(grille): """return the maximum value of the grid""" max=0 for i in range(len(grille)): for j in range(len(grille[0])): if grille[i][j]==None: continue if grille[i][j]>max: max = grille[i][j] return max
def lane_change(observation): """ :param observation: :return: a new observation putting all possible lane change into position """ # Find all vehicles who are ready to change lane: veh_lower_lane = [veh for veh in observation if veh[1] == 1] po_index = [] # index for vehicle who needs to pull over: for i in range(len(observation)): if (observation[i][1] == 0) and (observation[i][2] == 0): po_index.append(i) for elem in po_index: can_pull_over = True for veh_1 in veh_lower_lane: if observation[elem][0] >= veh_1[0]: leading_veh = observation[elem] following_veh = veh_1 else: leading_veh = veh_1 following_veh = observation[elem] if leading_veh[0] - leading_veh[3] < following_veh[0]: can_pull_over = False if can_pull_over: observation[elem][1] = 1 return observation
def predict(pred_header, pred_values, my_tree): """ given a decision tree, predict a given dataset's outcome """ # find the best feature to split split_feature = list(my_tree.keys())[0] # print('best feature to split: {}'.format(split_feature)) feature_index = pred_header.index(split_feature) feature_val = pred_values[feature_index].replace(' ', '') # deal with possible placeholder Ties. try: cur_res = my_tree[split_feature][feature_val] if cur_res == 'No' or cur_res == 'Yes' or cur_res == 'Tie': # print(cur_res) return cur_res except KeyError: # print('no attributes in tree') return 'Tie' # kinda cheesy, but works :P except Exception as e: print('exception :{}'.format(e)) # should not happen # delete the feature from pred_header # only keep the trees with that values and call recursively new_tree = my_tree[split_feature][feature_val] # print('decision tree:{}\n'.format(new_tree)) pred_header.remove(split_feature) pred_values.pop(feature_index) final_res = predict(pred_header, pred_values, new_tree) return final_res
def getTimeString(seconds): """ Get a pretty time string, using hours, minutes, seconds and milliseconds as required. :param seconds: The desired time span, given in seconds. Can be an int or a float. :return: A string representing the desired time span, given in hours, minutes, seconds and milliseconds. """ units = list() msecs = (seconds % 1) * 1000 if msecs >= 1: units.append('{0}ms'.format(int(msecs % 60))) units.append('{0}s'.format(int(seconds % 60))) minutes = seconds/60 if minutes >= 1: units.append('{0}m'.format(int(minutes % 60))) hours = minutes/60 if hours >= 1: units.append('{0}h'.format(int(hours % 60))) return ' '.join(units[::-1])
def _string_lower(string): """ Convenience function to lowercase a string. :param string: The string which will be lower-cased. :returns: Lower-cased copy of string s. """ return string.lower()
def flatten(dictionary, level=[]): """ Flattens a dictionary by placing '.' between levels. This function flattens a hierarchical dictionary by placing '.' between keys at various levels to create a single key for each value. It is used internally for converting the configuration dictionary to more convenient formats. Implementation was inspired by `this StackOverflow post <https://stackoverflow.com/questions/6037503/python-unflatten-dict>`_. Parameters ---------- dictionary : dict The hierarchical dictionary to be flattened. level : str, optional The string to append to the beginning of this dictionary, enabling recursive calls. By default, an empty string. Returns ------- dict The flattened dictionary. See Also -------- lfads_tf2.utils.unflatten : Performs the opposite of this operation. """ tmp_dict = {} for key, val in dictionary.items(): if type(val) == dict: tmp_dict.update(flatten(val, level + [key])) else: tmp_dict['.'.join(level + [key])] = val return tmp_dict
def fuse_missing_perc(name: str, perc: float) -> str: """ Append (x.y%) to the name if `perc` is not 0 """ if perc == 0: return name return f"{name} ({perc:.1%})"
def qs_order_by(qs, arg=None): """ Usage: {{ qs|qs_order_by:"id" }} """ #import ipdb; ipdb.set_trace() return qs.order_by('id') if qs else []
def sentence_prepocessing(flickr_sentences, sep='-'): """ This pre-processing function unifies 'phrase_type' when they are more than one. Note that in order to increase the performance, we apply this function "inplace". An example is given by sentence 48296285.txt line 0: phrase 'a lab' has 'phrase_type': ['animals', 'scene']. In this case this function returns: 'phrase_type': 'animals-scene'. :param flickr_sentences: Flickr30k sentences. :param sep: separator to use in merging types. :return: reference to the same data in input which is modified "inplace" due to performance reasons. """ for sentence in flickr_sentences: for phrase in sentence['phrases']: if len(phrase['phrase_type']) == 1: current = phrase['phrase_type'][0] else: current = sep.join(phrase['phrase_type']) phrase['phrase_type'] = current return flickr_sentences
def words(line): """Splits a line of text into tokens.""" return line.strip().split()
def similarity(item, user, sim_dict): """ similarity between an item and a user (a set of items) """ if user not in sim_dict or item not in sim_dict[user]: return 0 else: return sim_dict[user][item]
def change_to_id(obj): """Change key named 'uuid' to 'id' Zun returns objects with a field called 'uuid' many of Horizons directives however expect objects to have a field called 'id'. """ obj['id'] = obj.pop('uuid') return obj
def find_n_max_vals(list_, num): """Function searches the num-biggest values of a given list of numbers. Returns the num maximas list and the index list wrapped up in a list. """ li_ = list_.copy() max_vals = [] #the values max_ind = []# the index of the value, can be used to get the param while num > 0: max_val = max(li_) max_id = li_.index(max_val) max_vals.append(max_val) max_ind.append(max_id) li_[max_id] = 0 #better than deleting num -= 1 # count down return [max_vals, max_ind]
def calculate_y_pos(x, centre): """Calculates the y-coordinate on a parabolic curve, given x.""" centre = 80 y = 1 / centre * (x - centre) ** 2 return int(y)