content
stringlengths
42
6.51k
def make_label(label_text): """ returns a label object conforming to api specs given a name """ return {'messageListVisibility': 'show', 'name': label_text, 'labelListVisibility': 'labelShow'}
def sort_mappers(classes): """ try to put mapper objects into an insert order which will allow foreign key constraints to be satisfied """ order = [] #iterate over a copy of the list so we dont modify the original classlist = dict(classes) # not sure what it is but it mucks things up if '_sa_module_registry' in classlist: del classlist['_sa_module_registry'] # because tables with foreign key constraints may come up in the list prior to the tables they rely on # we may skip some tables one (or more) times and need to re-iterate the list until we have them all # #XXX: circular dependencies could put this into an endless loop?! # while True: for name, cls in classlist.items(): # tables with no foreign key constraints can be inserted into the new db at any time if len(cls.__table__.foreign_key_constraints) == 0: if name not in order: order.append(name) if name in classlist: del classlist[name] else: foreign_tables = [fkc.referred_table.name for fkc in cls.__table__.foreign_key_constraints] # if the table has a foreign key pointing to itself, we can ignore it if name in foreign_tables: foreign_tables.remove(name) # if all tables with foreign keys are ahead of this one its safe to add it to the queue if set(foreign_tables).issubset(set(order)): if name not in order: order.append(name) if name in classlist: del classlist[name] if len(classlist.items()) == 0: break return order
def permute_point(p, permutation=None): """ Permutes the point according to the permutation keyword argument. The default permutation is "012" which does not change the order of the coordinate. To rotate counterclockwise, use "120" and to rotate clockwise use "201".""" if not permutation: return p return [p[int(permutation[i])] for i in range(len(p))]
def pack_numbers(numbers): """Packs a list of numbers into bins with a maxmimum sum of 100""" bins = [[]] for n in numbers: # if current bin would be too full after adding n, create new bin first if sum(bins[-1]) + n > 100: bins.append([]) # add n to current bin bins[-1].append(n) return bins
def is_number(s): """This is used to check if the input string can be used as a number""" try: float(s) return True except ValueError: return False
def to_bytes( arg ): """Converts arg to a 'bytes' object.""" return bytes( bytearray( arg )) # if python2, 'bytes' is synonym for 'str'
def restructure_day_array(day): """Restructures day array into start times and end times. Will be used to fill the editable profile form with correct values. """ # day is received as a list [{starttime, endtime}, {startime, endtime}] # all are in 1-hour blocks. Only need first and last. # if the day we are given is empty, it will be 'false'. if not day: # return the 'none' values for both start and end times. return -1, -1 else: # return the starting value of the first 1-hour block starttime = day[0]['starttime'] # return the ending value of the last 1-hour block endtime = day[-1]['endtime'] return starttime, endtime
def extract_token(auth_response): """ Extract an auth token from an authentication response. :param dict auth_response: A dictionary containing the decoded response from the authentication API. :rtype: str """ return auth_response['access']['token']['id'].encode('ascii')
def stomp_subscribe(topic): """ Return a javascript callback that subscribes to a given topic, or a list of topics. """ sub = "stomp.subscribe('%s');" if isinstance(topic, list): sub = ''.join([sub % t for t in topic]) else: sub = sub % topic return sub
def chunk_string(in_s, n): """Chunk string to max length of n""" return "\n".join( "{}\\".format(in_s[i : i + n]) for i in range(0, len(in_s), n) ).rstrip("\\")
def list_to_str(value): """ Converts list objects to string objects. Exception used to handle NaN values. """ try: aux = ';'.join(value) return aux except TypeError: return value
def flattening(rad): """Flattening of a spheroid Arguments --------- rad : dict {'a', 'b', 'c'} Equatorial1, Equatorial2, and polar radius """ a, b, c = rad['a'], rad['b'], rad['c'] return (a-c)/a
def check_one_liners(docstring, context, is_script): """One-liner docstrings should fit on one line with quotes. The closing quotes are on the same line as the opening quotes. This looks better for one-liners. """ if not docstring: return lines = docstring.split('\n') if len(lines) > 1: non_empty = [l for l in lines if any([c.isalpha() for c in l])] if len(non_empty) == 1: return True
def ConvertToABMag(mag: float, band: str) -> float: """Converts a magnitude to the AB system. Args: mag: The magnitude to convert. band: The band of the magnitude. Returns: The magnitude converted to the AB system. Raises: ValueError: If the band is not 'R' or 'V'. """ if band == "V": return mag - 0.044 elif band == "R": return mag + 0.055 else: raise ValueError(f"Band {band} cannot be converted to AB.")
def prox_laplacian(a, lamda): """Prox for l_2 square norm, Laplacian regularisation.""" return a / (1 + 2.0 * lamda)
def replace_invalid_path_chars(path, replacement='-'): """ Replace invalid path character with a different, acceptable, character """ exclude = set('\\/"?<>|*:-') path = ''.join(ch if ch not in exclude else replacement for ch in path) return path
def _obtain_input_shape(input_shape, default_size, min_size, data_format): """Internal utility to compute/validate an ImageNet model's input shape. # Arguments input_shape: either None (will return the default network input shape), or a user-provided shape to be validated. default_size: default input width/height for the model. min_size: minimum input width/height accepted by the model. data_format: image data format to use. include_top: whether the model is expected to be linked to a classifier via a Flatten layer. # Returns An integer shape tuple (may include None entries). # Raises ValueError: in case of invalid argument values. """ if data_format == 'channels_first': default_shape = (3, default_size, default_size) else: default_shape = (default_size, default_size, 3) if data_format == 'channels_first': if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') if input_shape[0] != 3: raise ValueError('The input must have 3 channels; got `input_shape=' + str(input_shape) + '`') if ((input_shape[1] is not None and input_shape[1] < min_size) or (input_shape[2] is not None and input_shape[2] < min_size)): raise ValueError('Input size must be at least ' + str(min_size) + 'x' + str(min_size) + ', got `input_shape=' + str(input_shape) + '`') else: input_shape = (3, None, None) else: if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') if input_shape[-1] != 3: raise ValueError('The input must have 3 channels; got `input_shape=' + str(input_shape) + '`') if ((input_shape[0] is not None and input_shape[0] < min_size) or (input_shape[1] is not None and input_shape[1] < min_size)): raise ValueError('Input size must be at least ' + str(min_size) + 'x' + str(min_size) + ', got `input_shape=' + str(input_shape) + '`') else: input_shape = (None, None, 3) return input_shape
def top_files(query, files, idfs, n): """ Given a `query` (a set of words), `files` (a dictionary mapping names of files to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the filenames of the the `n` top files that match the query, ranked according to tf-idf. """ tfidf = dict() filerank = list() for file in files: tfidf[file] = 0 for word in query: tfidf[file] += files[file].count(word)*idfs[word] for key, value in sorted(tfidf.items(),key=lambda item: item[1] , reverse=True): filerank.append(key) filerank = filerank[0:n] return filerank
def get_cell_description(cell_input): """Gets cell description Cell description is the first line of a cell, in one of this formats: * single line docstring * single line comment * function definition """ try: first_line = cell_input.split("\n")[0] if first_line.startswith(('"', '#', 'def')): return first_line.replace('"','').replace("#",'').replace('def ', '').replace("_", " ").strip() except: pass return "no description"
def search_codetree_hasleftsub(tword,codetree): """ Stored in codetree with non-zero value in any except the terminal node """ pos = 0 while True: s = tword[pos] if s not in codetree: return 0 elif codetree[s][0]>0: return codetree[s][0] elif pos==len(tword)-1: return 0 else: pos += 1 codetree = codetree[s][1]
def indexof(needle, haystack): """ Find an index of ``needle`` in ``haystack`` by looking for exact same item by pointer ids vs usual ``list.index()`` which finds by object comparison. For example:: >>> a = {} >>> b = {} >>> haystack = [1, a, 2, b] >>> indexof(b, haystack) 3 >>> indexof(None, haystack) Traceback (most recent call last): ... ValueError: None is not in [1, {}, 2, {}] """ for i, item in enumerate(haystack): if needle is item: return i raise ValueError("{!r} is not in {!r}".format(needle, haystack))
def factorial(n): """ What comes in: The sole argument is a non-negative integer n. What goes out: Returns n!, that is, n x (n-1) x (n-2) x ... x 1. Side effects: None. Examples: factorial(5) returns 5 x 4 x 3 x 2 x 1, that is, 120. factorial(0) returns 1 (by definition). """ # ------------------------------------------------------------------------- # DONE: 5. Implement and test this function. # Note that you should write its TEST function first (above). # # IMPORTANT: Your solution MUST # use an explicit for ... in range(...): statement. # ------------------------------------------------------------------------- answer_from_my_code = 1 for k in range(n): answer_from_my_code = answer_from_my_code*(n-k) return answer_from_my_code
def drop_duplicates(bq_client, table_id, field_name, ids): """ Drop ids from table_id that will be updated to avoid duplicates bq_client: BigQuery Client table_id: Table to affect field_name: name of te "primary_key" field ids: list of ids to drop return: Nothing, resulting call to bq """ if not ids: return 'No ids to remove' try: quotation_ids = ['"' +s + '"' for s in ids] except TypeError: quotation_ids = ['"' +s + '"' for s in list(map(str, ids))] collapsed_ids = ', '.join(map(str, quotation_ids)) query = f"""DELETE `{table_id}` WHERE {field_name} in ({collapsed_ids})""" query_job = bq_client.query(query) return query_job.result()
def get_from_dict(dictionary, key, default_value): """ Gets value from dict, if it does not exist, returns default_value :param dictionary: dict, which we want to retrieve value of key from :param key: key, which value we are looking for :param default_value: if key does not exists in dictionary, this value is returned :return: dict[key], or default_value """ if dictionary: return dictionary.get(key, default_value) return default_value
def f(n): """ n: integer, n >= 0. """ if n == 0: # Problem is here, where n is equal to zero # Gets multiplied by previous results gving us 0 # Need to return 1 instea return 1 else: return n * f(n-1)
def ros_publish_cmd(topic, _msg, _id=None): """ create a rosbridge publish command object :param topic: the string name of the topic to publish to :param _msg: ROS msg as dict :param _id: optional """ command = { "op": "publish", "topic": topic, "msg": _msg } if _id: command["id"] = _id return command
def jp_server_config(jp_unix_socket_file): """Configure the serverapp fixture with the unix socket.""" return {"ServerApp": {"sock": jp_unix_socket_file, "allow_remote_access": True}}
def annuity(A, r, g, t): """ A = annual payment r = discount rate t = time periods returns present value """ return((A/r) * (1 - 1/((1+r)**t)))
def build_histogram(text): """Builds a distribution of the word types and numbers of word tokens.""" histogram = {} words = text.split() # make a distribution of word types and count of tokens for word in words: word = word.lower() if word not in histogram: histogram[word] = 1 else: # word type already seen before in histogram histogram[word] += 1 return histogram
def format_baseline_list(baseline_list): """Format the list of baseline information from the loaded files into a cohesive, informative string Parameters ------------ baseline_list : (list) List of strings specifying the baseline information for each SuperMAG file Returns --------- base_string : (str) Single string containing the relevent data """ uniq_base = dict() uniq_delta = dict() for bline in baseline_list: bsplit = bline.split() bdate = " ".join(bsplit[2:]) if bsplit[0] not in uniq_base.keys(): uniq_base[bsplit[0]] = "" if bsplit[1] not in uniq_delta.keys(): uniq_delta[bsplit[1]] = "" uniq_base[bsplit[0]] += "{:s}, ".format(bdate) uniq_delta[bsplit[1]] += "{:s}, ".format(bdate) if len(uniq_base.items()) == 1: base_string = "Baseline {:s}".format(list(uniq_base.keys())[0]) else: base_string = "Baseline " for i, kk in enumerate(uniq_base.keys()): if i == 1: base_string += "{:s}: {:s}".format(kk, uniq_base[kk][:-2]) else: base_string += " {:s}: {:s}".format(kk, uniq_base[kk][:-2]) else: base_string += "unknown" if len(uniq_delta.items()) == 1: base_string += "\nDelta {:s}".format(list(uniq_delta.keys())[0]) else: base_string += "\nDelta " for i, kk in enumerate(uniq_delta.keys()): if i == 1: base_string += "{:s}: {:s}".format(kk, uniq_delta[kk][:-2]) else: base_string += " {:s}: {:s}".format(kk, uniq_delta[kk][:-2]) else: base_string += "unknown" return base_string
def checkInstance(ids, claims): """ Check the value of an instance (example: Q5) of an item. if the value is in the list of `ids`, return True otherwise return False @param ids: list|string of ids @param claims: pywikibot.page._collections.ClaimCollection @return bool """ ids = ids if isinstance(ids, list) else [ids] ids = [ int(i.lstrip('Q')) for i in ids ] P31 = claims['P31'] if 'P31' in claims else [] if len(P31): instance = P31[0].toJSON() item = instance['mainsnak']['datavalue']['value'] if int(item.get('numeric-id')) in ids: return True return False
def check_sample_filters(sample, project): """ :param sample: :param project: :return: """ if project == 'ENCODE': bt = sample['biosample_type'] if bt.startswith('immortal') or bt.startswith('in vitro'): return False btn = (sample['biosample_term_name']).lower() if btn == 'naive b cell': return False if any([x in btn for x in ['right', 'left', 'female', 'fat', 'induced', 'inferior', 'superior', 'uterus', 'putamen', 'l1-s8', 'gland', 'testis', 'ovary', 'prostate', 'psoas', 'horn', 'pons', 'gyrus', 'nucleus', 'pedal', 'abdomen', 'amnion', 'chorion', 'amniotic', 'mononuclear', 'langerhans', 'cortex', 'cortical', 'occipital', 'pallidus', 'olfactory', 'hematopoietic', 'telencephalon']]): return False if 'fibroblast' in btn and 'product_id' in sample: # numerous fibroblast cell lines return False if 'life_stage' in sample: ls = (sample['life_stage']).lower() if any([x in ls for x in ['adult', 'unknown']]): return True return False elif project == 'Roadmap Epigenomics': bt = sample['TYPE'] if bt in ['CellLine', 'ESCDerived']: return False if 'AGE' in sample: age = (sample['AGE']).lower() if any([x in age for x in ['fetus', 'child', 'postnatal', '0y', 'newborn', 'fetal']]): return False edn = (sample['EDACC_NAME']).lower() if any([x in edn for x in ['fetal', 'stimulated', 'gm12878', 'k562', 'induced']]): return False std = (sample['STD_NAME']).lower() if any([x in std for x in ['ips', 'neurospheres', 'derived', 'variant', 'left', 'right', 'nuclei', 'psoas', 'gyrus', 'pedal', 'imr90', 'abdomen', 'amnion', 'chorion']]): return False return True else: raise ValueError('Unknown project: {}'.format(project))
def validate_codons(starts,stops): """ Check if codons are valid Parameters ---------- starts : list List of start codons. stops : list List of stop codons. Returns ------- bool True is codons are fine. """ validalphabets=['A','C','T','G'] if not starts: starts=[] if not stops: stops=[] #check lengths for c in starts: if not len(c)==3: print('Invalid start codon:'+c) return False if not ((c[0] in validalphabets) and (c[1] in validalphabets) and (c[2] in validalphabets)): print('Invalid start codon:'+c) return False for c in stops: if not len(c)==3: print('Invalid stop codon:'+c) return False #check intersection for c in starts: if c in stops: print('Invalid codon in both lists:'+c) return False return True
def map_keys_with_obj(f, dct): """ Calls f with each key and value of dct, possibly returning a modified key. Values are unchanged :param f: Called with each key and value and returns the same key or a modified key :param dct: :return: A dct with keys possibly modifed but values unchanged """ f_dict = {} for k, v in dct.items(): f_dict[f(k, v)] = v return f_dict
def boolToInt(boolean): """Returns 1 if boolean is True, else returns 0""" if boolean == True: return 1 return 0
def filter_comics(comics): """Filter comics based on (hardcoded) criterias. On the long run, I'd like the criteria to be provided via command-line arguments.""" comics = list(comics) initial_len = len(comics) filtered_comics = [c for c in comics if "new" in c] filtered_len = len(filtered_comics) if initial_len != filtered_len: print( "After filtering, %d out of %d comics were kept" % (filtered_len, initial_len) ) return filtered_comics
def cotain_chinese(text): """ check if the input text contains Chinese """ for char in text: if '\u4e00' <= char <= '\u9fa5': return True return False
def extended_euclidean_algorithm(m, n): """ Extended Euclidean Algorithm. Finds 2 numbers a and b such that it satisfies the equation am + bn = gcd(m, n) (a.k.a Bezout's Identity) """ a = 0 a_prime = 1 b = 1 b_prime = 0 q = 0 r = 0 if m > n: c = m d = n else: c = n d = m while True: q = int(c / d) r = c % d if r == 0: break c = d d = r t = a_prime a_prime = a a = t - q * a t = b_prime b_prime = b b = t - q * b pair = None pair = (a, b) if m > n else (b, a) return pair
def opposite_bearing(bearing1:float) -> float: """Return the oppisite bearing, e.g. 90 -> 270 Parameters ---------- bearing1 : float Bearing in degrees. Returns ------- bearing : float Opposite bearing in degrees. """ return bearing1 - 180*(bearing1 > 180) + 180*(bearing1 <= 180)
def output(theta,final_list): """Function takes each point and returns the x,y,z coordinates in three lists. Input: theta (for testing), final sides dictionary Output: three lists of x,y,z """ #create 3 empty lists for the x,y,z coordinate values x = list() y = list() z = list() final_coordinates = [] #iterate through dictionary to add the side coordinates to a list for i in final_list: final_coordinates.append(final_list[i][2]) #iterate through coordinate list to create three lists of x,y, and z respectively for i in final_coordinates: for j in i: x.append(j[0]) y.append(j[1]) z.append(j[2]) return x,y,z
def get_business_obj_dict(business_obj_list): """ Convert business_obj_list to business_obj_dict, with key: business_id, V: business_obj """ business_obj_dict = {} for w in business_obj_list: business_obj_dict[w['business_id']] = w return business_obj_dict
def chunks(iterable: list, amount: int): """ Split a list into x chunks :param iterable: List of stuff to chunk :param amount: How many chunks? :return: List of lists """ avg = len(iterable) / float(amount) out = [] last = 0.0 while last < len(iterable): out.append(iterable[int(last):int(last + avg)]) last += avg return out
def lists_match(list_data, item_data): """ Returns True if the two list items are of the same type, with the same delimiter and bullet character. This is used in agglomerating list items into lists. """ return list_data.get('type') == item_data.get('type') and \ list_data.get('delimiter') == item_data.get('delimiter') and \ list_data.get('bullet_char') == item_data.get('bullet_char')
def getFuncName(sig): """Get the function name from a signature or '', if signature is not a function.""" sig = sig.strip() pos1 = sig.rfind('(') if pos1 < 0: return "" else: pos1 -= 1 while (pos1 >= 0) and (sig[pos1] == ' '): pos1 -= 1 pos2 = sig.rfind(' ', 0, pos1) return sig[pos2 + 1: pos1 + 1]
def league_stats(n_clicks): """ This function just returns a sentence to update the tab h2 element pretty much an initial test to see how updating the tab works :param n_clicks: :return: string """ if n_clicks == 0: return None return str("Overall League Statistics.")
def trapezoid_area(base_minor, base_major, height): """Returns the area of a trapezoid""" # You have to code here # REMEMBER: Tests first!!! return (1/2) * (base_minor + base_major) * height
def get_possible_successors_set_present_in_node_ids(possible_successors, nodes_ids): """ :param possible_successors: :param nodes_ids: :return: """ return set(possible_successors).intersection(set(nodes_ids))
def handler_mergeList(a, b): """List merger""" #FIXME : there is certainly a better python way ! for p in b : if p not in a: a.append(p) return a
def difference(d1, d2): """Difference of two dicts. Return elements of the first dict not existing in the second dict.""" ret = {} for k in d1: if k not in d2: ret[k] = d1[k] return ret
def get_chain(value, key_chain, default=None): """Gets a value in a chain of nested dictionaries, with a default if any point in the chain does not exist. """ for key in key_chain: if value is None: return default value = value.get(key) return value
def format_tweet(tweet): """ Basic output formatting of the tweet (dict) """ buffer = "" buffer += "@" + tweet['user']['screen_name'] + ": " buffer += tweet['text'] + "\n" try: if len(tweet['entities']['media']) > 0: buffer += "media: " + tweet['entities']['media'][0]['media_url_https'] + "\n" except KeyError: pass # No media except IndexError: pass # No media return buffer
def read_dbxrefs(entry): """Parse db_links and return a list of dbxrefs""" # expected input (example): # kegg_information["DBLINKS"] = [ # 'DBLINKS PubChem: 4509', # 'ChEBI: 17950', # 'LIPIDMAPS: LMSP0501AB00', # 'LipidBank: GSG1147' # ] # # expected output (example): # dbxref_id = [ # 'PubChem:4509', # 'ChEBI:17950', # 'LIPIDMAPS:LMSP0501AB00', # 'LipidBank:GSG1147' # ] dbxref_id = [] for lines in entry: for line in lines: line = line.strip().split() if "DBLINKS" in line[0]: for word in line[2:]: dbxref_tuple = (line[1], word) dbxref_id.append("".join(dbxref_tuple)) else: for word in line[1:]: dbxref_tuple = (line[0], word) dbxref_id.append("".join(dbxref_tuple)) return dbxref_id
def df(*v): """Return a path to a test data file""" from os.path import dirname, join return join(dirname(__file__), 'test_data', *v)
def usd(value): """Formats value as USD.""" if value < 0: return f"-${value*-1:,.2f}" else: return f"${value:,.2f}"
def removeUnicodeIdentifiers(s): """ Removes the u infrount of a unicode string: u'string' -> 'string' Note that this also removes a u at the end of string 'stru' -> 'str' which is not intended. @ In, s, string, string to remove characters from @ Out, s, string, cleaned string """ s = s.replace("u'","'") return s
def lustre_string2index(index_string): """ Transfer string to index number, e.g. "000e" -> 14 """ index_number = int(index_string, 16) if index_number > 0xffff: return -1, "" return 0, index_number
def dedup(dict, to_match): """Quick and dirty function to deduplicate the lists of dictionaries based on an id to determine whether or not an item has been 'seen' """ seen = set() dlist = [] for d in dict: if d[to_match] not in seen: seen.add(d[to_match]) dlist.append(d) return dlist
def preparse_address(addr_spec): """ Preparses email addresses. Used to handle odd behavior by ESPs. """ # sanity check, ensure we have both local-part and domain parts = addr_spec.split('@') if len(parts) < 2: return None # if we add more esp specific checks, they should be done # with a dns lookup not string matching domain if parts[1] == 'gmail.com' or parts[1] == 'googlemail.com': parts[0] = parts[0].replace('.', '') return parts
def get_slack_current_subgraph_notification_message(subgraph_name: str, subgraph_version: str, subgraph_network: str, subgraph_last_block_number: str, infura_last_block_number: str): """ Get slack message template with defined values :param subgraph_name: :param subgraph_version: :return: """ main_title = ":rotating_light: `Subgraph status is NOT OK`" message = { 'text': 'Subgraph is NOT OK status', 'blocks': [ { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': main_title } }, { 'type': 'section', 'fields': [ { 'type': 'mrkdwn', 'text': f'*Subgraph name:*\n {subgraph_name}' }, { 'type': 'mrkdwn', 'text': f'*Version (current|pending):*\n `current`' }, { 'type': 'mrkdwn', 'text': f'*Subgraph last block:*\n {subgraph_last_block_number}' }, { 'type': 'mrkdwn', 'text': f'*Infura last block:*\n {infura_last_block_number}' } ] } ] } return message
def prefix_average3(S): """Return list such that, for all j, A[j] equals average of S[0], ..., S[j].""" n = len(S) A = [0] * n total = 0 for j in range(n): total += S[j] # update prefix sum to include S[j] A[j] = total / (j+1) return A
def _canonical_name(name): """ Replace aliases to the corresponding type. """ if name.startswith('int['): return 'uint256' + name[3:] if name == 'int': return 'uint256' if name.startswith('real['): return 'real128x128' + name[4:] if name == 'real': return 'real128x128' return name
def not_preceded_by(pattern): """ matches if the current position is not preceded by the pattern :param pattern: an `re` pattern :type pattern: str :rtype: str """ return r'(?<!{:s})'.format(pattern)
def makeaxisstep(start=0, step=1.00, length=1000, adjust=False, rounded=-1): """ Creates an axis, or vector, from 'start' with bins of length 'step' for a distance of 'length'. :type start: float :param start: first value of the axis. Default is 0. :type step: float :param step: Step size for the axis. Default is 1.00. :type length: int :param length: LEngth of axis. Default is 1000. :type adjust: boolean :param adjust: If True, rounds (adjusts) the deimals points to the same as the step has. Default is False. :type rounded: int :param rounded: Number of decimals to consider. If -1 then no rounding is performed. :returns: Axis with the set parameters. :rtype: list[float] """ if adjust: d = str(step)[::-1].find('.') axis = [round(start+step*i,d) for i in range(length)] else: if rounded >= 0: axis = [round(start+step*i,rounded) for i in range(length)] else: axis = [start+step*i for i in range(length)] return axis
def _get_loc2(area_val, near_val): """ Handle location string, variant 2. :param area_val: value of the 'area' field. :param near_val: value of the 'near' field. :return: """ if area_val: s = " located in the %s area" % area_val if near_val: s += ", near %s." % near_val else: s += "." elif near_val: s = " located near %s." % near_val else: raise NotImplementedError() return s
def sign(x: float) -> float: """Return the sign of the argument. Zero returns zero.""" if x > 0: return 1.0 elif x < 0: return -1.0 else: return 0.0
def find_volume(r): """Returns the volume of a sphere with a given radius """ from math import pi return (4/3)*pi*(r**3)
def is_sequence_match(pattern: list, instruction_list: list, index: int) -> bool: """Checks if the instructions starting at index follow a pattern. :param pattern: List of lists describing a pattern, e.g. [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies pattern :param instruction_list: List of instructions :param index: Index to check for :return: Pattern matched """ for index, pattern_slot in enumerate(pattern, start=index): try: if not instruction_list[index]["opcode"] in pattern_slot: return False except IndexError: return False return True
def overlapping_intervals(intervals: list) -> list: """ O(n*log(n)) """ length = len(intervals) if length < 2: return intervals intervals.sort(key=lambda _: _[0]) # sort by starting interval return_list = [intervals[0]] for i in range(1, length): if return_list[-1][1] >= intervals[i][0]: # only compare with the last element return_list[-1] = ( return_list[-1][0], max(return_list[-1][1], intervals[i][1]), ) break else: return_list.append(intervals[i]) return return_list
def sanitize_int(value): """ Sanitize an input value to an integer. :param value: Input value to be sanitized to an integer :return: Integer, or None of the value cannot be sanitized :rtype: int or None """ if isinstance(value, str): try: return int(value) except ValueError: return None elif isinstance(value, int): return value
def vector_columna(matriz, columna): """ (list of list, int) -> list Obtiene el vector columna para la posicion de la matriz >>> vector_columna([[1,2],[1,2]],0) [1, 1] >>> vector_columna([[1,2],[1,2]],1) [2, 2] :param matriz: la matriz que contiene nuestro vector :param columna: int la poscicion de la columna de nuestro vector :return: list con los elementos de la columna seleccionada """ vector_resultante = [] for fila in matriz: vector_resultante.append(fila[columna]) return vector_resultante
def sort_player_scores(unsorted_scores, highest_possible_score): """Takes in a list of unsorted scores and the highest possible scores and returns a list of sorted scores""" tracker = {} sorted_scores = [] if type(unsorted_scores) != list: raise TypeError( "The first argument for sort_player_scores must be of type list.") elif type(highest_possible_score) != int: raise TypeError( "The second argument for sort_player_scores must be of type int.") elif len(unsorted_scores) < 1: return sorted_scores for score in unsorted_scores: if score in tracker: tracker[score] += 1 else: tracker[score] = 1 for i in range(highest_possible_score, -1, -1): if i in tracker: sorted_scores += [i] * tracker[i] return sorted_scores
def convert_v4_address_bits_to_string(ip_address_bits): """ return v4 address bits to string for example: '11111111111111110000000000000000' -> '255.255.0.0' """ ip_address_octets = [int(ip_address_bits[i:i + 8], 2) for i in range(0, len(ip_address_bits), 8)] ip_address = '.'.join([str(octet) for octet in ip_address_octets]) return ip_address
def classesByDepartment (theDictionary, department): """Lists the courses being taken from specific department. :param dict[str, int] theDictionary: The student's class information with the class as the key and the number or credits as the value :param str department: The department in question :return: The courses being taken within department :rtype: list """ department = department.upper ().strip () classes = [] for i in theDictionary: classDept = i.split ()[0] if classDept == department: classes.append (i) return classes
def path_to_obj(root_obj, attr_path): """Get an object from a root object and "attribute path" specification. >>> class A: ... def foo(self, x): ... ... foo.x = 3 ... class B: ... def bar(self, x): ... ... >>> obj = path_to_obj(A, ('foo',)) >>> assert callable(obj) and obj.__name__ == 'foo' >>> path_to_obj(A, ('foo', 'x')) 3 >>> obj = path_to_obj(A, ('B', 'bar')) >>> assert callable(obj) and obj.__qualname__ == 'A.B.bar' """ obj = root_obj for attr in attr_path: obj = getattr(obj, attr) return obj
def is_integer(s: str) -> bool: """Return True if the text is an integer""" return s.isdigit()
def extract(input_data: str) -> list: """take input data and return the appropriate data structure""" lines = input_data.split('\n') return list(list(map(int, row)) for row in lines)
def zoom_to_roi(zoom, resolution): """Gets region of interest coordinates from x,y,w,h zoom parameters""" x1 = int(zoom[0] * resolution[0]) x2 = int((zoom[0]+zoom[2]) * resolution[0]) y1 = int(zoom[1] * resolution[1]) y2 = int((zoom[1]+zoom[3]) * resolution[1]) return ((x1,y1),(x2,y2))
def hexdump(data): """Pretty print a hex dump of data, similar to xxd""" lines = [] offset = 0 while offset < len(data): piece = data[offset:offset + 16] bytes = ''.join([('%02x ' % ord(x)) for x in piece]) chars = ''.join([(x if 0x20 < ord(x) < 0x7f else '.') for x in piece]) lines.append('%04x %-24s %-24s %-16s' % (offset, bytes[:24], bytes[24:], chars)) offset += len(piece) return "\n".join(lines)
def generate_seqmining_dataset(patterns): """This function generates a sequence database to mine n-grams from. Parameters ---------- patterns : List of Textual Patterns Returns ------- type List of Sequences """ smining_dataset = [] for pattern in patterns: words = pattern.split(" ") temp = [] for word in words: if word.startswith("CHEMICAL_") or word.startswith("DISEASE_") or word.startswith("GENE_"): if len(temp) != 0: temp = ' '.join(temp) smining_dataset.append(temp) temp = [] else: temp.append(word) return smining_dataset
def _underline(text: str, line_symbol: str): """ Add a line made of `line_symbol` after given `text`, and return new text. """ return text + "\n" + line_symbol * len(text)
def is_set(obj): """Helper method to see if the object is a Python set. >>> is_set(set()) True """ return type(obj) is set
def get_filter_in(filters): """Recuperar filtros de una lista""" filter_in = {} for filter_index, value in list(filters.items()): if isinstance(value, list): filter_in.update({filter_index: value}) del filters[filter_index] return filter_in
def to_right(d): """return direction as vector, rotated 90 degrees right""" return d[1], -d[0]
def evaluate_cv_total_accuracy(val_beauty_acc, val_fashion_acc, val_mobile_acc, kaggle_public_acc=None): """Utility function to evaluate results, pass kaggle_public_acc=False to evaluate local CV score""" if kaggle_public_acc: total_examples = 57317 + 43941 + 32065 + 172402*0.3 num_correct = 57317*val_beauty_acc + \ 43941*val_fashion_acc + \ 32065*val_mobile_acc + \ 172402*0.3*kaggle_public_acc else: total_examples = 57317 + 43941 + 32065 num_correct = 57317*val_beauty_acc + \ 43941*val_fashion_acc + \ 32065*val_mobile_acc return num_correct/total_examples
def identity(arg, **kwargs): """The identity function returns the input.""" # pylint: disable=unused-argument return arg
def shift_row(ns): """ ns -> States of nibbles Predict the states of nibbles after passing through ShiftRow in SomeCipher. """ assert len(ns) == 12 n0, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11 = ns return [n0, n1, n10, n7, n4, n5, n2, n11, n8, n9, n6, n3]
def substr_enclosed_in_seq(data, initial_seq, terminal_seq, from_index=0, to_index=-1, include_trailing_newline=False): """ Get first substring between initial and terminal key sequence if present in data, e.g. if data is '[[my_val]]', initial_seq is '[[', and terminal_seq is ']]', the result is {'value': 'my_val', 'start': 0, 'end': 8, 'error': ''}; :param data: string :param initial_seq: string :param terminal_seq: string :param from_index: index from which to start search :param to_index: index where to stop search (not including it), -1 means the data end :param include_trailing_newline: if true, result['end'] will be advanced to include trailing new line chars; :return: Dict: 'value' (everything between initial and terminal sequence, 'start', 'end' (open range, index of next character after teminal_seq end) 'error': empty string if no errors, 'syntax' otherwise """ result = {'value': '', 'start': -1, 'end': -1, 'error': ''} if to_index == -1: start = data.find(initial_seq, from_index) else: start = data.find(initial_seq, from_index, to_index) if start == -1: return result result['start'] = start if to_index == -1: end = data.find(terminal_seq, start) else: end = data.find(terminal_seq, start, to_index) if end == -1: result['error'] = 'syntax' return result result_end = end + len(terminal_seq) if include_trailing_newline: # Search for trailing '\n' and/or '\r' symbols and # include them into the range data_len = len(data) if data_len - result_end >= 2: if data[result_end] == '\r' or data[result_end] == '\n': result_end += 1 if data[result_end] == '\r' or data[result_end] == '\n': if data[result_end] != data[result_end - 1]: result_end += 1 elif data_len - result_end == 1: if data[result_end] == '\r' or data[result_end] == '\n': result_end += 1 result['end'] = result_end result['value'] = data[start + len(initial_seq):end] return result
def get_airflow_version(ac_version): """Get Airflow Version from the string containing AC Version""" return ac_version.split('-')[0]
def del_null(num): """ replace null string with zero """ return 0 if num is None else num
def abbr(label): """Gets a better visualization with abbreviations of long attribute names.""" if label == "fuel_economy": return "fuel" # publication_date and date_posted do not appear in the same vertical. elif label == "publication_date": return "date" elif label == "date_posted": return "date" elif label == "mpaa_rating": return "mpaa" elif label == "isbn_13": return "isbn13" else: return label
def ContainsString(field, value): """ A criterion used to search for objects having a text field's value like the specified `value`. It's a wildcard operator that wraps the searched value with asteriscs. It operates the same way a the `Like` criterion For example: * search for cases where title is like `*malspam*` * search for observable where description contains the text `*malware*` Arguments: field (value): field name value (Any): searched value Returns: dict: JSON repsentation of the criterion ```python # Search for tasks where title contains 'Communication' query = ContainsString('title', 'Communication') ``` produces ```json { "_wildcard": { "_field": "title", "_value": "*Communication*" } } ``` """ if not value.endswith('*'): value = value + '*' if not value.startswith('*'): value = '*' + value return {'_wildcard': {'_field': field, '_value': value}}
def listToIndiceString(my_list): """ for example: turns [[1,2,63,7], [5,2,986], [305,3], []] into 1,2,63,7;5,2,986;305,3; """ output = ";".join([",".join([str(int(item)) for item in row]) for row in my_list]) return output
def hamming(set1, set2): """Hamming distance between sets `a` and `b`. The Hamming distance for sets is the size of their symmetric difference, or, equivalently, the usual Hamming distance when sets are viewed as 0-1-strings. Parameters ---------- set1, set2 : iterable of int The two sets, for which the Hamming distance is computed.""" diffs = [x for x in set1 if x not in set2] + [x for x in set2 if x not in set1] return len(diffs)
def get_fill(st, starttime=None, endtime=None): """ Subroutine to get data fill @rtype: float """ if len(st) == 0: # no trace return 0.0 ststart = min(tr.stats.starttime for tr in st) stend = max(tr.stats.endtime for tr in st) dttot = (stend if not endtime else endtime) - \ (ststart if not starttime else starttime) gaps = st.get_gaps() fill = 1.0 if starttime: fill -= max(ststart - starttime, 0.0) / dttot if endtime: fill -= max(endtime - stend, 0.0) / dttot for g in gaps: gapstart = g[4] gapend = g[5] if starttime: gapstart = max(gapstart, starttime) gapend = max(gapend, starttime) if endtime: gapstart = min(gapstart, endtime) gapend = min(gapend, endtime) fill -= (gapend - gapstart) / dttot return fill
def by_locale(value_for_us, value_for_international): """ Return a dictionary mapping "us" and "international" to the two values. This is used to create locale-specific values within our UNITS. """ return {"us" : value_for_us, "international" : value_for_international}
def flatten_list(a, result=None): """Flattens a nested list. >>> flatten_list([[1, 2, [3, 4]], [5, 6], 7]) [1, 2, 3, 4, 5, 6, 7] """ if result is None: result = [] for x in a: if isinstance(x, list): flatten_list(x, result) else: result.append(x) return result
def validateFilename(value): """ Validate filename. """ if 0 == len(value): raise ValueError("Name of SimpleGridDB file must be specified.") return value
def data_has_value(data, search_value): """Recursively search for a given value. Args: data (dict, list, primitive) search_value (string) Returns: (bool) True or False if found """ if isinstance(data, list): return any(data_has_value(item, search_value) for item in data) if isinstance(data, dict): return any(data_has_value(v, search_value) for v in data.values()) return data == search_value
def getParameter(parameter, key, default = None): """Gets a parameter from a dict, returns default value if not defined Arguments: parameter (dict): parameter dictionary key (object): key default (object): deault return value if parameter not defined Returns: object: parameter value for key """ if not isinstance(parameter, dict): return default if key in parameter.keys(): return parameter[key] else: return default
def decode_csv(csv_string, column_names): """Parse a csv line into a dict. Args: csv_string: a csv string. May contain missing values "a,,c" column_names: list of column names Returns: Dict of {column_name, value_from_csv}. If there are missing values, value_from_csv will be ''. """ import csv r = next(csv.reader([csv_string])) if len(r) != len(column_names): raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names))) return {k: v for k, v in zip(column_names, r)}