content
stringlengths
42
6.51k
def make_url(path, api=False): """Make a bigg api request url.""" if api: return 'http://bigg.ucsd.edu/api/v2/%s' % path.lstrip('/') else: return 'http://bigg.ucsd.edu/%s' % path.lstrip('/')
def make_fai_cmd(in_fa): """return cmd string to make fai""" return 'samtools faidx %s' % in_fa
def get_section_link(header): """ Add a section link Args: :header: (str) section header Returns: :rst: (str) RST documentation """ rst = "" rst += f".. _sec_mframwork_{header.lower().replace(' ', '_')}:" rst += "\n\n" return rst
def dSquaredLoss(a, y): """ :param a: vector of activations output from the network :param y: the corresponding correct label :return: the vector of partial derivatives of the squared loss with respect to the output activations """ # assuming that we measure the L(a, y) by sum(1/2*square(y_i - a_i)) for all i parameters, so that the two cancels out # for each partial derivation L/a_i we have 1/2*square(a_j -y_j) = 0 where j != i # partial derivation for a single 1/2*square(a_i -y_i) = 1/2 * 2 * (y_i - a_i) * -1 = a_i - y_i # (a_i - y_i) gives the contribution of the final output per node to the total error return (a - y)
def extract_file_paths(argv): """Extract file paths from the command-line. File path arguments follow a `--` end-of-options delimiter, if any. """ try: # extract file paths, separated by `--` i = argv.index("--") input_files = argv[i + 1:] argv = argv[:i] except ValueError: input_files = None return argv, input_files
def SIMD_vectorization_latency_pruning(config): """Perform latency-based pruning at the SIMD vectorization stage. We have already reordered the SIMD candidate loops in descending order. Therefore, if the last design evaluated is slower than the opt design found so far, there is no chance for the rest of candidates which has a smaller SIMD factor to beat the opt design. We will stop exploration for these loops and return. Otherwise, if the resource usage is legal, we have already found a design that achieves the least latency in the current group. For the other designs with a smaller SIMD factor, their latency is no less than the current design. We will stop exploration for these loops and return. However, there a chance that the designs with a smaller SIMD factor acheives the same latency but with less resource usage (for a comm bound design). At present, we ignore such cases. """ last_design = config["monitor"]["last_design"] if last_design["latency"] == -1: # The current design is already slower than opt., stop exploration. return True else: # The current design is resource-legal, stop exploration. if not last_design["resource"]: return True return False
def parse_input(text): """Parse a list of destinations and weights Returns a list of tuples (source, dest, weight). Edges in this graph are undirected. The input contains multiple rows appearing like so: A to B = W Where A and B are strings and W is the weight to travel between them in a graph. """ def parse_line(line): """Parse a single line of the input""" parts = line.split() return (parts[0], parts[2], int(parts[4])) return [parse_line(line) for line in text.splitlines()]
def _nb_subst_metric(seq1, seq2, subst_dict, as_similarity=False): """Computes sequence similarity based on the substitution matrix. Requires that sequences are pre-aligned and equal length. Operates on strings and a dict substitution matrix""" assert len(seq1) == len(seq2) def _sim_func(s1, s2, subst): sim12 = 0. for i in range(len(s1)): k1 = s1[i] + '|' + s2[i] k2 = s2[i] + '|' + s1[i] sim12 += subst.get(k1, subst.get(k2, subst['n|a'])) return sim12 """Site-wise similarity between seq1 and seq2 using the substitution matrix subst""" sim12 = _sim_func(seq1, seq2, subst_dict) if as_similarity: return sim12 else: L = len(seq1) sim11 = _sim_func(seq1, seq1, subst_dict) sim22 = _sim_func(seq2, seq2, subst_dict) D = sim11 + sim22 - 2 * sim12 return D
def sfc_lw_cld(lwup_sfc, lwup_sfc_clr, lwdn_sfc, lwdn_sfc_clr): """Cloudy-sky surface net longwave radiative flux into atmosphere.""" return lwup_sfc - lwup_sfc_clr - lwdn_sfc + lwdn_sfc_clr
def class_to_dict(inst, ignore_list=[], attr_prefix=''): """ Writes state of class instance as a dict Includes both attributes and properties (i.e. those methods labeled with @property) Note: because this capture properties, it should be viewed as a snapshot of instance state :param inst: instance to represent as dict :param ignore_list: list of attr :return: dict """ output = vars(inst).copy() # captures regular variables cls = type(inst) # class of instance properties = [p for p in dir(cls) if isinstance(getattr(cls, p), property)] for p in properties: prop = getattr(cls, p) # get property object by name output[p] = prop.fget(inst) # call its fget for k in list(output.keys()): # filter out dict keys mentioned in ignore-list if k in ignore_list: del output[k] else: # prepend attr_prefix output[attr_prefix + k] = output.pop(k) output[attr_prefix + 'class'] = cls.__name__ return output
def sample_to_plane(sample_orientation: str): """Return a Cartesian circle plane type for a given sample orienation type""" sample_to_plane = {'S1': 'YZ', 'S2': 'XZ', 'S3': 'XY', 'S1R': 'ZY', 'S2R': 'ZX', 'S3R': 'YX', 'S4': 'YX to YZ', 'S5': 'XY to XZ', 'S6': 'XZ to YZ'} plane = sample_to_plane[sample_orientation] return plane
def find_keys_with_duplicate_values(ini_dict, value_to_key_function): """Finding duplicate values from dictionary using flip. Parameters ---------- ini_dict: dict A dict. value_to_key_function : function A function that transforms the value into a hashable type. For example if I have a list of int as value, I can transform the value in a int as follow: lambda x: int("".join([str(i) for i in x]) Returns ------- dict a flipped dictionary with values as key. """ # flipped = {} for key, value in ini_dict.items(): value = value_to_key_function(value) if value not in flipped: flipped[value] = [key] else: flipped[value].append(key) return flipped
def calculate_federal_income_tax(taxable_income): """Returns the federal income tax""" fed_tax = 0 if taxable_income <= 9700: fed_tax = taxable_income*0.1 elif taxable_income > 9700 and taxable_income <= 39475: fed_tax = 9700*0.1 + (taxable_income-9700)*.12 elif taxable_income > 39475 and taxable_income <= 84200: fed_tax = 9700*0.1 + (39475-9700)*0.12 + (taxable_income-39475)*0.22 elif taxable_income > 84200 and taxable_income <= 160725: fed_tax = 9700*0.1 + (39475-9700)*0.12 + (84200-39475)*0.22 + (taxable_income-84200)*0.24 elif taxable_income > 160725 and taxable_income <= 204100: fed_tax = 9700*0.1 + (39475-9700)*0.12 + (84200-39475)*0.22 + (160725-84200)*0.24 + (taxable_income-160725)*0.32 elif taxable_income > 204100 and taxable_income <= 510300: fed_tax = 9700*0.1 + (39475-9700)*0.12 + (84200-39475)*0.22 + (160725-84200)*0.24 + (204100-160725)*.32 + (taxable_income-204100)*0.35 elif taxable_income > 510300: fed_tax = 9700*0.1 + (39475-9700)*0.12 + (84200-39475)*0.22 + (160725-84200)*0.24 + (204100-160725)*0.32 + (510300-204100)*0.35 + (taxable_income-510300)*0.37 return fed_tax
def convertTime(time): """ Converts any entered time to seconds """ pos = ["s","m","h","d","mth","y"] time_dict = {"s":1,"m":60,"h":3600,"d":3600*24,"mth":3600*24*30,"y":3600*24*30*365} unit = time[-1] if unit not in pos: return -1 try: val = int(time[:-1]) except: return -1 return val * time_dict[unit]
def closest(arr, value): """ :return: The index of the member of `arr` which is closest to `value` """ if value == None: return -1 for i,ele in enumerate(arr): value = float(str(value).replace(',', '.')) if ele > value: return i - 1 return len(arr)-1
def extract_properties(data:dict) -> tuple: """ Extract properties from dictionary to generate a list of contours, a list of continents and an int for priority score. Args: data (dict): Dictionary with svg extracted data Returns: tuple: Tuple of (contour:list, continents:list, priority:int) """ contours, continents = [], [] for key, d in data.items(): if 'contour' in key: contours.append(d) if 'color' in key: continents.append(d) priority = data['priority'] return contours, continents, priority
def hosts_to_endpoints(hosts, port=2181): """ return a list of (host, port) tuples from a given host[:port],... str """ endpoints = [] for host in hosts.split(","): endpoints.append(tuple(host.rsplit(":", 1)) if ":" in host else (host, port)) return endpoints
def always_str_list(list_or_str): """Makes sure list_or_str is always a tuple or list Parameters ---------- list_or_str: str or str iterable (tuple, list, ...) Parameter to set as iterable if single str. Returns ------- Iterable Iterable equivalent to list_or_str.""" if isinstance(list_or_str, str): return (list_or_str,) return list_or_str
def get_message_payload(channel_id, msg): """Gets the message payload to be sent in the post message api""" return { "ts": "", "channel": channel_id, "username": "Market Bot", "attachments": [ msg ] }
def filter_tasks_for_slaves(slaves, tasks): """ Given a list of slaves and a list of tasks, return a filtered list of tasks, where those returned belong to slaves in the list of slaves :param slaves: the list of slaves which the tasks provided should be running on. :param tasks: the tasks to filter :returns: a list of tasks, identical to that provided by the tasks param, but with only those where the task is running on one of the provided slaves included. """ slave_ids = [slave['id'] for slave in slaves] return [task for task in tasks if task['slave_id'] in slave_ids]
def fib(x): """ Returns the xth term of the Fibonacci sequence """ a, b = 1, 1 for i in range(1, x): a, b = b, a + b x += 1 return(a)
def solve_timestep_differencer(z_future,z_downstream,z_past,dt,dx,U,K,A,m,n): """Solves the transient equations E = K A^m S^n dz/dt = U - E dz/dt = U - K A^m S^n (z^j+1-z^j)/dt = U - K A^m ((z_i^j+1-z_i-1^j+1)/dx)^n We move all terms to one side: 0 = U - K A^m ((z_i^j+1-z_i-1^j+1)/dx)^n - (z^j+1-z^j)/dt we then assume this is a function z_predict = U - K A^m ((z_i^j+1-z_i-1^j+1)/dx)^n - (z^j+1-z^j)/dt and use a root finding algorithm to solve. We use Newton's method if n = 1 and the toms748 algorithm if n != 1. tom's is faster and guaranteed to converge, but does not work if function is not differentiable 4 times. we use upslope values of K, U, A for the discretization Args: uses all data members, no args Returns: Overwrites the elevation Author: Simon M Mudd Date: 18/08/2020 """ difference = U - K*A**m * ( (z_future - z_downstream)/dx )**n - (z_future-z_past)/dt return difference
def cors(support="*", response=None, **kwargs): """Adds the the Access-Control-Allow-Origin header to this endpoint, with the specified support""" response and response.set_header("Access-Control-Allow-Origin", support) return support
def _align(value, alignment): """Find the smallest multiple of `alignment` that is at least as large as `value`.""" return ((value - 1) // alignment + 1) * alignment
def get_parents_from_list_of_attributes(fields): """Returns a list of parent ids from a list of column 9 entries.""" for field in fields: if "Parent" in field: return field.split("=")[1].split(",")
def coh_transform(coh, max_coh): """Convert coherence from 0-100 to -1-1""" return (coh-50)/(max_coh-50)
def precision(actual, predicted, k=10): """ """ m = len(actual) if m == 0: return -1 actual = set(actual) hit = 0. for i in range(k): p = predicted[i] if p in actual: hit += 1 return hit / k
def __get_col_name(col): """ Internal helper function to get the actual name of a pandas DataFrame column from a column name that may consists of an arbitrary sequence of tuples. The function will return the last value of the innermost tuple. """ curr_val = col while isinstance(curr_val, tuple): curr_val = curr_val[-1] return curr_val
def percent_avoid_rounding_to_zero(numerator, denominator, default_decimals=1, max_decimals=2): """Calculates percentage to `default_decimals` number of decimal places. If the percentage would round to 0, calculates with more decimal places until either it doesn't round to 0, or until `max_decimals`. `default_decimals` and `max_decimals` should be >= 0 and `max_decimals` should be >= `default_decimals`. """ decimals = default_decimals pct = round((float(numerator) / float(denominator) * 100), decimals) while pct == 0 and numerator != 0 and decimals < max_decimals: decimals += 1 pct = round((float(numerator) / float(denominator) * 100), decimals) return pct
def getErrorMsg(error=None, detail=None): """ print LogMapper Agent Menu """ msg = "ERROR" if error: msg = msg + ": " + str(error) if detail: msg = msg + " Detail: " + str(detail) return msg
def findAlphabeticallyLastWord(text): """ Given a string |text|, return the word in |text| that comes last alphabetically (that is, the word that would appear last in a dictionary). A word is defined by a maximal sequence of characters without whitespaces. You might find max() and list comprehensions handy here. """ # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this) return max([el for el in text.split()]) # END_YOUR_CODE
def format_cell(x): """Formats the content of a cell so it is displayed nicely.""" r = hex(x)[2:] if len(r) == 1: r = '0' + r return r.upper()
def get_frames_per_sample(fps, sample_time): """ Converts a specified sample_time from seconds to # of samples. Parameters ---------- fps: float Video sampling rate sample_time: float Duration (in seconds) to sample Returns ------- int The # of samples that corresponds to the specified sample_time. This value will always be odd. Values are always increased to obtain the odd value (e.g. an exact sampling of 4 samples becomes 5). Odd values are meant to specify grabbing a set of points that is inclusive of the end point so as to occupy the specified duration. """ ostensive_sampling_scale = sample_time * fps half_scale = int(round(ostensive_sampling_scale / 2)) sampling_scale = 2 * half_scale + 1 assert(isinstance(sampling_scale, int) or sampling_scale.is_integer()) return int(sampling_scale)
def tplink_build_url(host, path): """ A helper function to build URL's for the device """ return f"http://{host}/{path}"
def generate_key(message: str, key: str) -> str: """ >>> generate_key("THE GERMAN ATTACK","SECRET") 'SECRETSECRETSECRE' """ x = len(message) i = 0 while True: if x == i: i = 0 if len(key) == len(message): break key += key[i] i += 1 return key
def isZeroRow(m, row): """ (int, [m]) => boolean check if row with length m consists of only zeros """ for i in range(m): if row[i] != 0: return False return True
def isSubset(normalset1, normalset2): # print(len(normalset2)) """I need to make sure that """ # if(len(normalset1) > len(normalset2)): # return False; for i in normalset1: if i not in normalset2: return False; return True
def __process_line(line, spaces, padding, left_justify): """ Core method to indent each line. """ while (" " * 3) in line: line = line.replace((" " * 3), (" " * 2)) temp = line.strip() if left_justify and len(temp) == 1: newline = temp + "\r\n" else: list_line = line.split(" ") newline = "" for i in range(len(list_line)): sp_remain = padding - len(list_line[i]) if sp_remain < 1: newline += list_line[i] + (" " * spaces) else: newline += list_line[i] + (" " * sp_remain) newline = newline.strip() + "\r\n" if line.startswith(" "): newline = (" " * spaces) + newline return newline
def GetErrorsForTemplate(api_query): """Prepares and returns the template values for API Query error responses. Args: api_query: The API Query for which to prepare the errors template values. Returns: A dict containing a list of template values to use for each API Query error responses. """ errors = {} if api_query and api_query.api_query_errors: error_list = [] for error in api_query.api_query_errors: error_list.append({ 'timestamp': error.timestamp, 'content': error.content }) errors['errors'] = error_list return errors
def triangleNum(n): """Returns the nth triangle number.""" return int(n * (n + 1) / 2)
def get_port_numbers(ports): """ Get all port number from response and make comma-separated string :param ports: Port portion from response. :return: Comma-separated port numbers. """ return ', '.join([str(port['number']) for port in ports])
def str2fmap(line): """converts a string of the type 'f1=v1 f2=v2' into a feature map {f1: v1, f2: v2}""" return {k: float(v) for k, v in (pair.split('=') for pair in line.split())}
def yoToZA( yo_i ) : """Returns ZA designator, or special code, from yo id. That is, :: yo_i = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 is converted to 0, 1, 1001, 1002, 1003, 2003, 2004, 0, -8, -9, -10, 1, 1001, 1002, 1003, 2003, 2004 with the following meanings :: none, n, p, d, t, He3, He, g, b+, b-, EC, n, p, d, t, He3, He. yo_i must be a python integer.""" if ( yo_i < 0 ) or ( yo_i > 16 ) : raise Exception( "\nError in yoToZA: unsupported yo value = %s" % repr(yo_i) ) yo = yo_i if ( yo > 10 ) : yo -= 10 return ( 0, 1, 1001, 1002, 1003, 2003, 2004, 0, -8, -9, -10 )[yo]
def close(conn): """This closes the database connection. """ try: conn.close() except: pass return True
def find_colours(shapes): """ Takes a list of shapes with cells (y, x, colour) and finds their common and uncommon colours. Note that uncommon colours are a nested list, where each nested list corresponds to uncommon colours of a particular shape - different shapes could have different uncommon colours. >>> find_colours([[(0, 1, 4), (3, 5, 4), (9, 3, 1)], [(4, 11, 3), (5, 1, 3), (12, 19, 1)]]) ([1], [[4], [3]]) >>> find_colours([[(0, 1, 4), (3, 5, 8), (9, 3, 1)], [(4, 11, 3), (5, 1, 8), (12, 19, 1), (3, 1, 7)]]) ([8, 1], [[4], [3, 7]]) """ # collect all colours for all shapes # where each shape has a set of colours shapes_colours_sets = [] for shape in shapes: colours = set() for cell in shape: y, x, colour = cell colours.add(colour) shapes_colours_sets.append(colours) # intersect between all sets of all shapes common = shapes_colours_sets[0] for shape_colours in shapes_colours_sets: common = common.intersection(shape_colours) # Difference between each set and common uncommon = [] for shape_colours in shapes_colours_sets: uncommon.append(list(shape_colours - common)) return list(common), uncommon
def vecvecsum(vec1, vec2): """Elementwise sum res[i] = vec1[i] + vec2[i]""" return [v1 + v2 for v1, v2 in zip(vec1, vec2)]
def hexstring_to_wydelist(s): """Takes hexadecimal string representation and breaks it up into a list of wydes. This could be factored into hextsinrg_to_intlist function but it is useful standalone for exploring and debugging.""" n = 4 return list([s[i:i+n] for i in range(0, len(s), n)])
def get_groups(lines): """ Seperates lines in groups of declarations. :param lines: lines that were read from a file """ groups = [] group = [] for line in lines: # if empty line: add current group to groups if line == '\n': groups.append(group) group = [] continue # remove \n from line and add to current group group.append(line.split('\n')[0]) return groups
def url_to_chat_id(slack_url): """Convert a copy-n-pasted slack chat URL to the chat_id Go from: https://xyz.slack.com/archives/.../p1614771038052300 To: 1614771038.052300 """ # Recover the last element in URL and convert to chat_id. handle trailing / try: chat_id = [part for part in slack_url.split('/') if part][-1] except IndexError: # empty string given, just return it return slack_url chat_id = chat_id.lower().strip() # convert to chat_id stripping the trailing p chat_id = chat_id[1:] if chat_id[0] == 'p' else chat_id # convert to timestamp with: chat_id = f"{chat_id[:-6]}.{chat_id[-6:]}" return chat_id
def BoundingBox_collision(BB1, BB2): """ return true if two N-D bounding boxes collide, False otherwise""" for B1, B2 in zip(BB1,BB2): if (B1[1] < B2[0]) or (B2[1] < B1[0]): return False return True
def solution1(A): # O(N) """ You are given a list A (eg. [1, 2, 3, 7, 1, 5]). Write a function to calculate the pivot of the array such that the left hand side = right hand side. In the example, pivot (P) = 3, because 1 + 2 + 3 = 1 + 5. Expected time complexity = O(N). Expected auxillary space complexity = O(N). >>> solution1([1, 2, 3, 7, 1, 5]) 3 >>> solution1([2, 4, 5, 10, 1, 2, 4, 4]) 3 >>> solution1([4, 5, 2, 2, 1]) -1 >>> solution1([10, 3, 7, 3]) 1 >>> solution1([2, 8, 1, 10]) 2 """ i = 0 # O(1) total = sum(A) # O(N) while i < len(A): # < O(N) lastdigit = 0 # O(1) if i > 0: # O(1) lastdigit = A[i - 1] # O(1) total -= (A[i] + lastdigit) # O(1) if total == 0: # O(1) return i # O(1) i += 1 # O(1) return -1 # O(1)
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: O(n) Space Complexity: O(n) """ if num == 0: raise ValueError if num == 1: return '1' if num == 2: return '1 1' sequence = [0, 1, 1] for i in range(3, num + 1): next_num = sequence[sequence[i - 1]] + sequence[i - sequence[i - 1]] sequence.append(next_num) string_sequence = [str(number) for number in sequence[1:]] string_sequence = ' '.join(string_sequence) return string_sequence
def normalize_description(description): """ Normalizes a docstrings. Parameters ---------- description : `str` or `None` The docstring to clear. Returns ------- cleared : `str` or `None` The cleared docstring. If `docstring` was given as `None` or is detected as empty, will return `None`. """ if description is None: return None lines = description.splitlines() for index in reversed(range(len(lines))): line = lines[index] line = line.strip() if line: lines[index] = line else: del lines[index] if not lines: return None return ' '.join(lines)
def bad_request(msg: str = '') -> bytes: """Send a 59 (bad request) response. :param msg: Error message to display to the user. :return: Bytes to be sent to the client. """ return f'59 {msg}\r\n'.encode()
def flatten(records, fld): """ Flatten a recordset to list of a particular field :param records: the recordset to flatten :param fld: the field from the records to include in list :return: a list """ return [path for fields in records.values_list(fld) for path in fields] if records else []
def eqva(a1, a2): """ modified np.array_equal. considers a1 and a2 equal when there is 1 difference. """ a1.sort() a2.sort() count = 0 a1_, a2_ = a1, a2 if len(a1) > len(a2): a1_ = a2 a2_ = a1 for s in a1: if not s in a2: count += 1 return count
def distance(strand_a, strand_b): """Find the number of difference between two same length strings Parameters ---------- strand_a, strand_b : str """ if len(strand_a) == len(strand_b): count = 0 # shirouto: most of the time, if you see something like this in python # you better start looking for other ways: you really do not need # to use indeces to iterate through a sequence in python --- almost # always there are ways around. Why? Because it makes for less # readable code, exposes you to boundary issues (not particularly here) # and most importantly, you are likely to end up with not so efficient code # (and the latter does apply here). How many times have you computed the length # of the string in this code? You already know that is O(n), right? We have talked # about this previously... for i in range(len(strand_a)): if strand_a[i] != strand_b[i]: count += 1 return count else: raise ValueError("length of two inputs should be the same.")
def band_to_frequency(band): """ nominal band center frequency in GHz from band code """ if band == "L": return 1.7 elif band == "S": return 2.3 elif band == "C": return 5.0 elif band == "X": return 8.45 elif band == "Ku": return 15 elif band == "K": return 22 elif band == "Ka": return 34 elif band == "Q": return 42 elif band == "W": return 90 else: return None
def partition_on(predicate, iterable, map_fn=None): """ Partition an iterable into two lists on the return type of the predicate. """ left, right = [], [] if map_fn is None: for i in iterable: if predicate(i): left.append(i) else: right.append(i) else: for i in iterable: if predicate(i): left.append(map_fn(i)) else: right.append(map_fn(i)) return left, right
def wrap_in_list(maybe_list): """ This function checks if maybe_list is a list (or anything derived from list). If not, it wraps it in a list. The motivation for this function is that some functions return either a single object (e.g., a dictionary) or a list of those objects. The return value of this function can be iterated over safely. N.B. This function would not be helpful for ensuring something is a list of lists, for example. Args: maybe_list (obj): an object which may be a list Returns: either maybe_list if it is a list, or maybe_list wrapped in a list """ if isinstance(maybe_list, list): return maybe_list return [maybe_list]
def parse_notifier_name(name): """Convert the name argument to a list of names. Examples -------- >>> parse_notifier_name('a') ['a'] >>> parse_notifier_name(['a','b']) ['a', 'b'] >>> parse_notifier_name(None) ['anytrait'] """ if isinstance(name, str): return [name] elif name is None: return ['anytrait'] elif isinstance(name, (list, tuple)): for n in name: assert isinstance(n, str), "names must be strings" return name
def _str(_bytes): """ Attempt to decode bytes object back to ordinary Python string (utf-8). Decoding cannot be guaranteed, so be careful. @rtype: str, but will return the original data if it can't be interpreted as utf-8 """ try: return _bytes.decode("utf-8") except UnicodeDecodeError: return _bytes
def create_response(status, meta, path=""): """ Create tuple representing gemini response status: two-digit gemini status code meta: response info depending on status body: optional path to file to include in response body """ response = status, meta, path return response
def percentage(reviewed, voted): """Returns the percentage of voted contributions.""" try: return 100.0 * voted / reviewed except ZeroDivisionError: return 100.0
def printPath(path): """Assuming path is a list of nodes.""" result = '' for i in range(len(path)): result += str(path[i]) if i != len(path) - 1: result += ' --> ' return result
def get_common_lists(list1, list2): """Compare the items in two lists This method compares the items in two lists returning those items that appear in both lists as well as two lists that contain those unique items from the original lists. For example, given {s,b,c,d,e,f} and {a,b,c,d,e,z}, the lists returned are both = {b,c,d,e} in list1 not list2 = {s,f} in list2 not list1 = {a.z] list1[in] first list list2[in] second list Returns three lists """ s1 = set(list1) s2 = set(list2) both = s1 & s2 return(list(both), list(s1 - both), list(s2 - both))
def order(sentence): """Return a list of strings sorted by what number is in them.""" # example given "tw2o o1ne" returns "o1ne tw2o" word_list = sentence.split(' ') new_word_list = [] for i in range(1, len(word_list) + 1): for word in word_list: if str(i) in word: new_word_list.append(word) return ' '.join(new_word_list)
def tile(i, j, level): """Coordinates of tile in x, y space""" tile_size = 256 initial_resolution = 5 / tile_size resolution = initial_resolution / (2 ** level) side_length = resolution * tile_size x = i * side_length y = j * side_length return x, y, side_length
def flatten_config(config): """Take dict with ':'-separated keys and values or tuples of values, flattening to single key-value pairs. Example: _flatten_config({'a:b': (1, 2), 'c': 3}) -> {'a: 1, 'b': 2, 'c': 3}.""" new_config = {} for ks, vs in config.items(): ks = ks.split(":") if len(ks) == 1: vs = (vs,) for k, v in zip(ks, vs): assert k not in new_config, f"duplicate key '{k}'" new_config[k] = v return new_config
def log_simple(n, k): """ A function that simply finds how many k's does n have. For example 28 = 2 * 2 * 7, so log_simple(28, 2) will return 2 and log_simple(28, 7) will return 1 """ log_result = 0 while (n % k == 0): log_result += 1 n /= k return n, log_result
def _is_single_iterator_type(iters): """Determine if there is a single or multiple type of iterator If iters is [], this method returns True it considers the null case to be a single iterator type. """ if iters: return len(set(iters)) == 1 else: return True
def _validate(data, players): """Chat messages can be bugged - check for invalid messages.""" numbers = [p['number'] for p in players] if 'player_number' in data and data['player_number'] not in numbers: return False return True
def size_unicode(arg): """Calculate the size of a unicode string""" return len(arg.encode('utf-8'))
def release_notes_pg_minor_version(minor_version, major_version): """Formats the minor version number to the appropriate PostgreSQL version. This is particularly for very old version of PostgreSQL. """ if str(major_version) in ['0', '1']: return str(minor_version)[2:4] return minor_version
def remove_repeated_asn(path): """ remove repeated ASN in the give path Args: path (list of ASN): ASN can be int for str if IXP hop Returns: list of ASN """ removed = [] for idx, hop in enumerate(path): if idx == 0: removed.append(hop) elif hop != path[idx-1]: removed.append(hop) return removed
def get_tax(base_cost: float, tax_rate: float) -> float: """Return the tax on base_cost.""" tax = base_cost / 100 * tax_rate return tax
def get_range( value ): """ Filter - returns a list containing range made from given value Usage (in template): <ul>{% for i in 3|get_range %} <li>{{ i }}. Do something</li> {% endfor %}</ul> Results with the HTML: <ul> <li>0. Do something</li> <li>1. Do something</li> <li>2. Do something</li> </ul> Instead of 3 one may use the variable set in the views """ return range( 1, value + 1 )
def attrs(m, first=[], underscores=False): """ Given a mapping m, return a string listing its values in a key=value format. Items with underscores are, by default, not listed. If you want some things listed first, include them in the list first. """ keys = first[:] for k in m.keys(): if not underscores and k.startswith('_'): continue if k not in first: keys.append(k) return ', '.join(["{0}={1}".format(k, repr(m[k])) for k in keys])
def detect_compression_format(filename): """ Attempts to detect compression format from the filename extension. Returns None if no format could be detected. """ if filename.endswith('.bz2'): return "bz2" elif filename.endswith('.xz'): return "xz" elif filename.endswith('.gz'): return "gz" else: return None
def check_sign_projection_of_a_on_b(a, b): """ The magnitude of the projection of a on b is given by: (a \\dot b)/|b| However, |b| is always >= 0. Thus, the sign is determined solely by the sign of a \\dot b. """ if (a[0] * b[0] + a[1] * b[1]) > 0: return 1 else: return 0
def patch_name_parts_limit(name_str, space_replacer=None): """ Usage: par_name = patch_name_parts_limit(name_str, <space_replacer>) clean up name_str such that it may be decoded with patch_name_to_dict and serve as a valid file name Args: name_str: string representation for case_id or class_label or file_extension space_replacer: python str to replace spaces - Returns: part_name: name_str string with spaces removed, reserved characters removed and underscores replaced with hyphens """ # remove spaces: substitute if valid space replacer is input if space_replacer is not None and isinstance(space_replacer, str): name_str = name_str.replace(' ', space_replacer) # no spaces! name_str = name_str.replace(' ', '') # remove reserved characters reserved_chars = ['/', '\\', '?', '%', '*', ':', '|', '"', '<', '>'] part_name = ''.join(c for c in name_str if not c in reserved_chars) # replace underscore with hyphen to allow decoding of x and y location part_name = part_name.replace('_', '-') return part_name
def update_dic_dist(dic_dist, dic_xyz, d_real, d_approx, phase): """ For every annotation in a single image, update the final dictionary""" # Update the dict with heights metric if phase == 'train': dic_dist['heights']['head'].append(float(dic_xyz['head'][0][1])) dic_dist['heights']['shoulder'].append(float(dic_xyz['shoulder'][0][1])) dic_dist['heights']['hip'].append(float(dic_xyz['hip'][0][1])) dic_dist['heights']['ankle'].append(float(dic_xyz['ankle'][0][1])) # Update the dict with distance metrics for the test phase if phase == 'val': error = abs(d_real - d_approx) if d_real <= 10: dic_dist['error']['10'].append(error) elif d_real <= 20: dic_dist['error']['20'].append(error) elif d_real <= 30: dic_dist['error']['30'].append(error) else: dic_dist['error']['>30'].append(error) dic_dist['error']['all'].append(error) return dic_dist
def add_edge_output_graph(edge_to_add, graph): """ Helper function that takes an edge of the form ({"node1", "node2"}, node_weight) and adds it to the list of edges in the graph, in the form {"node1": [("node2", node_weight)], "node2": [("node1", node_weight)]} . """ for val in edge_to_add[0]: graph[val] = graph[val] + [(list(edge_to_add[0].difference({val}))[0], edge_to_add[1])] return graph
def vals_are_same(offset, *content): """ Determine if the values at offset of the content arrays are all the same. :param offset: The offset into the content arrays to check. :param content: The content arrays. :return: True if content[0][offset] == content[1][offset] == ..., False otherwise. """ check = None for arr in content: if check is None: # Initialize check value with value from first content array. check = arr[offset] continue if check == arr[offset]: continue return False return True
def length_of_last_word(text): """ Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string. NOTE: Please make sure you try to solve this problem without using library functions. Make sure you only traverse the string once.If there is one word it is the last word. Args: text (str): long string consisted of word(s) Return: length (int): length of the last word in the string """ # counter for the last word in the string length = 0 last_word = "" # start iterating the string at the end of it for i in range(len(text)-1, -1, -1): char = text[i] if char.isalpha() and i == 0: length = 0 elif char.isalpha(): last_word += char length += 1 # stop the loop if we there is space, because it means # we count the length of the last word already elif char.isspace(): break # print(last_word[::-1]) return length
def format_offset(offset): """Return a right-aligned hexadecimal representation of offset. >>> format_offset(128) ' 0080' >>> format_offset(3735928559) 'deadbeef' """ return '0x%5x%03x' % (offset >> 12, offset & 0xFFF)
def check_the_bucket(bucket): """checks bucket, returns true or false.""" if 'gold' in bucket: return True else: return False
def max_sum_subaarry(a): """ Kadane's algo """ max_so_far = 0 max_ending_here = 0 n = len(a) start = 0 end = 0 s = 0 for i in range(0, n): max_ending_here = max_ending_here + a[i] if max_ending_here < 0: max_ending_here = 0 s = i + 1 if max_ending_here > max_so_far: max_so_far = max_ending_here start = s end = i return max_so_far, a[start:end+1]
def list_duplicates_of(seq,item): """Return sequences of duplicate adjacent item in seq""" start_at = -1 locs = [] sequences = [] start_index = -1 while True: try: loc = seq.index(item,start_at+1) except ValueError: end_index = locs[-1] sequences.append([start_index, end_index]) # print("break end_index=%d" % end_index) break else: if not locs: # seq=[loc,0] start_index = loc # print( "item=%d, start: %d" % (item, loc) ) else: if (loc-locs[-1]) != 1: end_index = locs[-1] sequences.append([start_index, end_index]) start_index = loc # print( "item=%d, end: %d, new_start: %d" % (item, locs[-1], loc) ) locs.append(loc) start_at = loc return sequences
def update_file_list(file_content, tag_index, formated_informations): """ Return the list file_content with formated_informations inserted at tag_index position. file_content : list containing the current file tag_index : position, in list, of tags to insert between formated_informations : preformated_func_infos """ file_content[tag_index[0]: tag_index[1]+1] = formated_informations return file_content
def parsetagvalue(term): """ This function obtains all attributes of a term and puts it in a dictionary. Though it returns an empty string if function input is not an GO term. :param term: A list containing all information from one term obtained from the obofile. :return: """ if not term or term[0][0:7] != 'id: GO:': return '' data = {} for line in term: tag = line.split(': ', 1)[0] value = line.split(': ', 1)[1] if tag not in data: data[tag] = [] data[tag].append(value) return data
def search(inp, neuron_labels): """ Utility function to retrieve neuron labels with a filter. # Arguments inp (str): Keyword to use for filtering. neuron_labels (dict): Dictionary of neuron labels, for example generated with load_normalized_hemibrain. # Returns list: Returns names """ return [x for x,v in neuron_labels.items() if inp in v]
def parse_job_line(line): """ >>> parse_job_line("* * * *,myquery,mycredentials\\n") ('* * * *', 'myquery', 'mycredentials', 'collect.py') >>> parse_job_line("* * * *,myquery,mycredentials,scripts/foo.py\\n") ('* * * *', 'myquery', 'mycredentials', 'scripts/foo.py') """ parts = line.strip().split(',') if len(parts) == 3: parts.append('collect.py') return tuple(parts)
def convert_to_gbit(value): """ convert bytes to Gbits Args value Value in bytes to be converted to Gbits """ return value/1024./1024./1024.*8
def minmax(data): """Solution to exercise R-1.3. Takes a sequence of one or more numbers, and returns the smallest and largest numbers, in the form of a tuple of length two. Do not use the built-in functions min or max in implementing the solution. """ min_idx = 0 max_idx = 0 for idx, num in enumerate(data): if num > data[max_idx]: max_idx = idx if num < data[min_idx]: min_idx = idx return (data[min_idx], data[max_idx])
def diff1(arr): """kevin.du on CodeWars solutions.""" b = 0 c = 0 for i in arr: a = eval(i) if abs(a) > c: b = i c = abs(a) return b
def get_formal_writers(writers_string): """Return 'Lennon-McCartney' if at least one of them is present in the argument""" first_writer = writers_string.split(',')[0] if first_writer == 'Starkey': return 'Starkey' if first_writer == 'Harrison': return 'Harrison' if 'Lennon' in writers_string or 'McCartney' in writers_string: return 'Lennon-McCartney' if 'Harrison' in writers_string: return 'Harrison' if 'Starkey' in writers_string: return 'Starkey' return writers_string
def jaccard(setA, setB): """"Jaccard Index""" setA, setB = set(setA), set(setB) return len(setA.intersection(setB)) / len(setA.union(setB))
def tail_avg(timeseries): """ This is a utility function used to calculate the average of the last three datapoints in the series as a measure, instead of just the last datapoint. It reduces noise, but it also reduces sensitivity and increases the delay to detection. """ try: t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3 return t except IndexError: return timeseries[-1][1]
def is_even(number:int): """ This function verifies if the number is even or not Returns: True if even false other wise """ return number % 2 == 0