content
stringlengths
42
6.51k
def asfloat(x): """try to convert value to float, or fail gracefully""" try: return float(x) except (ValueError, TypeError): return x
def invoke_if_valid(cb, value): """Returns the result of cb(value) if cb is callable, else -- just value""" if cb is None or not callable(cb): return value return cb(value)
def basename(p): # type: (str) -> str """Returns the final component of a pathname""" i = p.rfind('/') + 1 return p[i:]
def Scheme(endpoint): """Returns https scheme for all the endpoints except localhost.""" if endpoint.startswith('localhost:'): return 'http' else: return 'https'
def makename(package, module): """Join package and module with a dot.""" # Both package and module can be None/empty. if package: name = package if module: name += '.' + module else: name = module return name
def count_phrase_frequency( raw_list ): """ count the frequency that phrases and words appear in a text, when passed the list of phrases (which are kept as lists). It's a list of lists. """ frequencies = dict() for sub_list in raw_list: #for each sub list in the list items = [item for item in sub_list ] joined = " ".join( items ) #flatten it into a string #and count that ish if joined in frequencies: frequencies[joined] += 1 else: frequencies[joined] = 1 return frequencies
def scalar_mul(c, X): """Multiply vector by scalar.""" return [c*X[0], c*X[1]]
def determine_adjusted(adjusted): """Determines weather split adjusted closing price should be used.""" if adjusted == False: return 'close' elif adjusted == True: return 'adjClose'
def seq_decode(_string, symbols): """ Decode a number from a using encoded by a sequence of symbols. :param str _string: string to decode :param symbols: sequence key :type symbols: str or list[char] :returns: decoded value :rtype: int """ value = 0 base = len(symbols) for i, c in enumerate(reversed(_string)): value += symbols.index(c) * i**base return value
def post_processing_aggregate_results(l_d_classification_results): """ Post processing results obtained from multi-processing cones prefixes. """ d_ases_cones_prefixes_merged = dict() # prepare three dictionaries as output for dict_result in l_d_classification_results: for k, v in dict_result.iteritems(): d_ases_cones_prefixes_merged[k] = v return d_ases_cones_prefixes_merged
def getval(obj, getter): """Gets a value from an object, using a getter which can be a callable, an attribute, or a dot-separated list of attributes""" if callable(getter): val = getter(obj) else: val = obj for attr in getter.split('.'): val = getattr(val, attr) if callable(val): val = val() if val is None: break if val is True: val = 'yes' elif val is False: val = '' return (str(val or ''))
def array_remove_duplicates(s): """removes any duplicated elements in a string array.""" found = dict() j = 0 for x in s: if x not in found.keys(): found[x] = True s[j] = x j = j + 1 return s[:j]
def average_word_error_rate(word_error_rate_scores, combined_ref_len): """ This function calculates average word error rate of the whole sentence. :param word_error_rate_scores: (float) Word error rate of each word. :param combined_ref_len: (int) Length of the reference sentence. :return: (float) Average word error rate. """ return float(sum(word_error_rate_scores)) / float(combined_ref_len)
def _get_rid_for_name(entries, name): """Get the rid of the entry with the given name.""" for entry in entries.values(): if entry['name'] == name: return entry['rid'] raise ValueError(u'No entry with name {} found in entries {}'.format(name, entries))
def stripped(userhost): """ return a stripped userhost (everything before the '/'). """ return userhost.split('/')[0]
def func_args_pq(*args, p="p", q="q"): """func. Parameters ---------- args: tuple p, q: str, optional Returns ------- args: tuple p, q: str """ return None, None, None, None, args, p, q, None
def _format_td(timedelt): """Format a timedelta object as hh:mm:ss""" if timedelt is None: return '' s = int(round(timedelt.total_seconds())) hours = s // 3600 minutes = (s % 3600) // 60 seconds = (s % 60) return '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
def pa_array_to_dict(array): """ Convert a D-Bus named array into a Python dict. FIXME: This assumes all keys and values are meant to be strings. Dbus's arrays come out annoyingly, firstly they use the dbus version of strings/ints/etc, but the string values come out as a zero-terminated byte array. So I gotta jump through some hoops to convert things around properly so that I can use them like normal dicts """ return {str(k): bytes(b for b in v).strip(b'\x00').decode() for k, v in array.items()}
def query_to_dict(results, descriptions): """ Replacement method for cursor.dictfetchall() as that method no longer exists in psycopg2, and I'm guessing in other backends too. Converts the results of a raw SQL query into a list of dictionaries, suitable for use in templates etc. """ output = [] for data in results: row = {} i = 0 for column in descriptions: row[column[0]] = data[i] i += 1 output.append(row) return output
def tap(x, label=None): """Prints x and then returns it.""" if label: print('%s: %s' % (label, x)) else: print(x) return x
def group_course_contents(courses_contents): """ Group all the course contents by content type. :param courses_contents: List of dictionaries of course contents for all the courses in course_list. :type courses_contents: list(dict(str, int) :return: A dictionary with a list of each of the assessments and resources. :rtype: dict(str, list(dict(str, int))) """ grouped_dict = {} for content in courses_contents: if content['type'] in grouped_dict: grouped_dict[content['type']].append(content) else: grouped_dict[content['type']] = [content] return grouped_dict
def calc_amount_function(obj_for_calc_amount): """ Calculates actual molecule amounts. Three types of amounts are calculated for a matched isotope chromatogram (MIC), maximum intensity, summed up intensity and area under curve. Additionally the score and the retention time at the maximum intensity are determined. A test function exists to check correct amount determination. Returned keys in amound dict: * 'max I in window' * 'max I in window (rt)' * 'max I in window (score)' * 'auc in window' * 'sum I in window' Returns: dict: amount dict with keys shown above. """ amount_dict = None if len(obj_for_calc_amount["i"]) != 0: amount_dict = {} maxI = max(obj_for_calc_amount["i"]) index_of_maxI = obj_for_calc_amount["i"].index(maxI) amount_rt = obj_for_calc_amount["rt"][index_of_maxI] amount_score = obj_for_calc_amount["scores"][index_of_maxI] amount_dict["max I in window"] = maxI amount_dict["max I in window (rt)"] = amount_rt amount_dict["max I in window (score)"] = amount_score amount_dict["sum I in window"] = sum(obj_for_calc_amount["i"]) amount_dict["auc in window"] = 0 x0, y0, x1, y1 = 0, 0, 0, 0 for pos_in_profile, intensity in enumerate(obj_for_calc_amount["i"]): if pos_in_profile == 0: continue # for auc calculation we need to be at position 2... x0 = obj_for_calc_amount["rt"][pos_in_profile - 1] # i.e. the last rt y0 = obj_for_calc_amount["i"][pos_in_profile - 1] # i.e. the last intensity x1 = obj_for_calc_amount["rt"][pos_in_profile] y1 = intensity xspace = x1 - x0 height_of_triangle = abs(y1 - y0) square = xspace * y0 triangle = 0.5 * (xspace * height_of_triangle) amount_dict["auc in window"] += square if y0 < y1: amount_dict["auc in window"] += triangle elif y0 > y1: amount_dict["auc in window"] -= triangle else: amount_dict["auc in window"] += 0 return amount_dict
def parse_item(item): """ Parse a CVE item from the NVD database """ parsed_items = {} cve = item.get('cve', {}) CVE_data_meta = cve.get('CVE_data_meta', {}) ID = CVE_data_meta.get('ID', '') parsed_items['id'] = ID affects = cve.get('affects', {}) vendor = affects.get('vendor', {}) vendor_datas = vendor.get('vendor_data', []) parsed_items['vendors'] = vendor_datas return parsed_items
def guess_format(text): """Guess YANG/YIN format If the first non-whitespace character is '<' then it is XML. Return 'yang' or 'yin'""" format = 'yang' i = 0 while i < len(text) and text[i].isspace(): i += 1 if i < len(text): if text[i] == '<': format = 'yin' return format
def linear_map(src_value, src_domain, dst_range): """Maps source value (src_value) in the source domain (source_domain) onto the destination range (dest_range) using linear interpretation. Except that at the moment src_value is a bytes value that once converted to integer that it then is on the src_domain. Ideally would like to move the conversion from bytes to int externally. Once value is same base and format as src_domain (i.e. converted from bytes), it should always fall within the src_domain. If not, that's a problem. """ src_min, src_max, dst_min, dst_max = src_domain + dst_range # assert(src_min <= src_value <= src_max) if not (src_min <= src_value <= src_max): raise ValueError slope = (dst_max - dst_min) / (src_max - src_min) dst_value = slope * (src_value - src_min) + dst_min if not (dst_min <= dst_value <= dst_max): raise ValueError return dst_value
def myround(number, multiple=5): """ x is the number to round base is the multiple to round to, default 5 """ return multiple * round(number/multiple)
def calculate_ideal_amount_of_iterations(n, k=1): """ Calculates the ideal amount of Grover's iterations required for n qubits (assuming k solutions). See: https://en.wikipedia.org/wiki/Grover%27s_algorithm#Extension_to_space_with_multiple_targets If there are no solutions, no iterations are needed (but actually those iterations wouldn't matter as they would just scramble the states) Args -------------------- n: amount of qubits to be diffused k: amount of solutions """ import math # return closest integer to the ideal amount of calculations return round((math.pi/4)*( math.sqrt(2**n/k) )) if k>0 else 0
def count_spaces(line): """ Counting the spaces at the start of a line """ return len(line)-len(line.lstrip(' '))
def get_shape_type(name): """Translate the shape type (Polygon, Polyline, Point) into a, l, or p.""" types = {"Polygon": "a", "Polyline": "l", "Point": "p"} try: return types[name] except KeyError: # Key not found print("Unknown shape type") return -1
def parser_preferred_name_identifier_Descriptor(data,i,length,end): """\ parser_preferred_name_identifier_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). This descriptor is not parsed at the moment. The dict returned is: { "type": "preferred_name_identifier", "contents" : unparsed_descriptor_contents } (Defined in ETSI EN 300 468 specification) """ return { "type" : "preferred_name_identifier", "contents" : data[i+2:end] }
def answer(panel_array): """ Returns the maximum product of positive and (odd) negative numbers.""" print("panel_array=", panel_array) # Edge case I: no panels :] if (len(panel_array) == 0): return str(0) # Get zero panels. zero_panels = list(filter(lambda x: x == 0 , panel_array)) print("zero_panels=", zero_panels) # Edge case II: no positive nor negative panels. if (len(zero_panels) == len(panel_array)): return str(0) # Get positive panels positive_panels = list(filter(lambda x: x >0 , panel_array)) print("positive_panels=", positive_panels) positive_product = 1 for x in positive_panels: positive_product *= x # Get negative panels. negative_panels = sorted(list(filter(lambda x: x <0 , panel_array))) print("negative_panels=", negative_panels) # Edge case III: there is only one "negative panel". if (len(negative_panels) == 1): # If this is the only panel. if (len(panel_array) == 1): return negative_panels[0] # If there are no positive panels, but there are some panels with zeros elif (len(positive_panels) == 0) and (len(zero_panels) > 1): return 0 # Check number of negative panels. if len(negative_panels) % 2 != 0: # Remove smallest. negative_panels.pop() print("final negative_panels=", negative_panels) negative_product = 1 for x in negative_panels: negative_product *= x # Return product of those two. return str(negative_product * positive_product)
def fix_filter_query(filter): """ fix filter query from user Args: filter: filter from users Returns: dict """ if filter: try: filter = { _.split("=")[0]: _.split("=")[1] for _ in list( set( str(filter).split("&") ) ) } except Exception: return {} return filter
def format_parsing_error(result): """ Output a formatted version of any parsing errors that Ansible runs into when looking at a playbook, if any are present. :param result: result to inspect :return: formatted output message """ if 'reason' in result: return 'Parsing the playbook failed:\n{}'.format(result['reason']) return ''
def add_padding(data: bytes, block_n_bytes: int) -> bytes: """Adds 0x80... padding according to NIST 800-38A""" res = data + b'\x80' len_remainder = len(res) % block_n_bytes if len_remainder != 0: res += b'\0' * (block_n_bytes - len_remainder) return res
def longest_increasing_subsequence(seq): """Docstrings are lame figure out the type signature yourself """ cache = {} # where we will memoize our answer def helper(x): if x == 0: # base case return seq[:1] if x in cache: # checking if we've memoized a previous answer return cache[x] # recursive case! best_so_far = [] # finding the longest sequence for smaller y for y in range(x): if seq[y] >= seq[x]: # skipping invalid values of y continue if len(helper(y)) > len(best_so_far): best_so_far = helper(y) best_so_far = best_so_far + [seq[x]] # can't use += as it mutates the original list cache[x] = best_so_far # memoizing return best_so_far return max([helper(i) for i in range(len(seq))], key=len)
def _remove_empty_items(input_json): """Removing nulls and empty strings from input_json.""" row_output = dict((k, v) for k, v in input_json.items() if v) return row_output if row_output else None
def calc_tdf(docs): """Calculates the tdf scores based on the corpus""" terms = set() for doc in docs: for term in doc: terms.add(term) tdf = {} for term in terms: doc_count = 0 for doc in docs: doc_count += 1 if term in doc else 0 tdf[term] = doc_count return tdf
def get_prov_transp(attr, ancestor_file, plotname): """Create a provenance record for the 1d meridional transports.""" caption = ("Thermodynamic Diagnostic Tool - Annual mean zonally averaged" " meridional {} transports" " for model {}.".format(attr[0], attr[1])) record = { 'caption': caption, 'statistics': ['mean'], 'domains': ['global'], 'plot_type': ['sect'], 'plot_file': plotname, 'authors': ['lembo_valerio'], 'references': ['lembo16climdyn', 'lembo19gmdd', 'lucarini14revgeop'], 'ancestors': ancestor_file, } return record
def parse(puzzle_input): """Parse input""" return [int(line) for line in puzzle_input.split()]
def alias_tags(tags_list, alias_map): """ update tags to new values Args: tags_list (list): alias_map (list): list of 2-tuples with regex, value Returns: list: updated tags """ def _alias_dict(tags): tags_ = [alias_map.get(t, t) for t in tags] return list(set([t for t in tags_ if t is not None])) tags_list_ = [_alias_dict(tags) for tags in tags_list] return tags_list_
def __fix_ada_dictionary__(bayesDict): """ If convert AdaBoost's "base_estimator" parameter into string :param bayesDict: :return: """ for k, v in bayesDict.items(): # if k == 'params': # Param grid does not behave properly, # listDict = bayesDict[k] # new_params = [] # for dictionary in listDict: # new_dictionary = {} # for label, item in dictionary.items(): # new_dictionary[label] = __clean_up_param_grid_item__(item) # Cleanup each item in param_gird dict # new_params.append(new_dictionary) # bayesDict.pop(k, None) # bayesDict[k] = new_params if k == 'param_base_estimator': estimator_list = bayesDict[k] new_list = [str(i) for i in estimator_list] bayesDict.pop(k, None) bayesDict[k] = new_list return bayesDict
def reflectance(n1, n2): """Calculate the amplitude reflection coefficient due to moving from media with index n1 to media with index n2. Args: n1 (float:): Refractive index of media 1. n2 (float:): Refractive index of media 2. Returns: float: Reflection coefficient""" return (n1 - n2) / (n1 + n2)
def _dict_conll_map_helper(values, empty, delim, av_separator, v_delimiter, formatter, av_key): """ Helper to map dicts to CoNLL-U format equivalents. Args: values: The value, dict, to map. empty: The empty CoNLL-U reprsentation for this value. delim: The delimiter between attribute-value pairs. av_separator: The separator between attribute and value. v_delimiter: The delimiter between values of the same attribute if necessary. formatter: The function to convert an attribute value pair into a CoNLL column representation. This function should take in a value representation, and a value delimiter, and output a string representation. It should also output None to mean there is no string representation. av_key: The sorting key for the attribute-value pairs on output. Returns: The CoNLL-U formatted equivalent to the value. """ def paramed(pair): f = formatter(pair[1], v_delimiter) if f is None: return (pair[0], ) return (pair[0], f) if values == {}: return empty sorted_av_pairs = sorted(values.items(), key=av_key) formatted = map(paramed, sorted_av_pairs) output = delim.join([av_separator.join(form) for form in formatted]) return output if output else empty
def get_base_by_color(base): """ Get color based on a base. - Uses different band of the same channel. :param base: :return: """ if base >= 250: return 'A' if base >= 180: return 'G' if base >= 100: return 'C' if base >= 30: return 'T' if base >= 5: return '*'
def parse_packageset(packageset): """ Get packages and repositories data from PES event's package set """ return {p['name']: p['repository'].lower() for p in packageset.get('package', [])}
def Qualify(ns, name): """Makes a namespace-qualified name.""" return '{%s}%s' % (ns, name)
def ConstantAddition(s, t, scheme): """ ConstantAddition function from LowMC spec. :params s: state :params bs: round constants :params t: round :param scheme: lowmc parametrization + constants """ return [s[i] ^ scheme['b'][t-1][i] for i in range(scheme['blocksize'])]
def IoU(bbox_1, bbox_2): """ Get IoU value of two bboxes :param bbox_1: :param bbox_2: :return: IoU """ w_1 = bbox_1[2] - bbox_1[0] + 1 h_1 = bbox_1[3] - bbox_1[1] + 1 w_2 = bbox_2[2] - bbox_2[0] + 1 h_2 = bbox_2[3] - bbox_2[1] + 1 area_1 = w_1 * h_1 area_2 = w_2 * h_2 overlap_bbox = (max(bbox_1[0], bbox_2[0]), max(bbox_1[1], bbox_2[1]), min(bbox_1[2], bbox_2[2]), min(bbox_1[3], bbox_2[3])) overlap_w = max(0, (overlap_bbox[2] - overlap_bbox[0] + 1)) overlap_h = max(0, (overlap_bbox[3] - overlap_bbox[1] + 1)) overlap_area = overlap_w * overlap_h union_area = area_1 + area_2 - overlap_area IoU = overlap_area * 1.0 / union_area return IoU
def boolstr(value, true="true", false="false"): """ Convert a boolean value into a string. This function is intended to be used from within file templates to provide an easy way to take boolean values stored in Pillars or Grains, and write them out in the apprpriate syntax for a particular file template. :param value: The boolean value to be converted :param true: The value to return if ``value`` is ``True`` :param false: The value to return if ``value`` is ``False`` In this example, a pillar named ``smtp:encrypted`` stores a boolean value, but the template that uses that value needs ``yes`` or ``no`` to be written, based on the boolean value. *Note: this is written on two lines for clarity. The same result could be achieved in one line.* .. code-block:: jinja {% set encrypted = salt[pillar.get]('smtp:encrypted', false) %} use_tls: {{ salt['slsutil.boolstr'](encrypted, 'yes', 'no') }} Result (assuming the value is ``True``): .. code-block:: none use_tls: yes """ if value: return true return false
def check_cache(cache_obj: dict) -> tuple: """ Check if redis is used to cache images, and if image 'name' is cached. Return state of redis, if image in cache and image if it is in cache. :param name: :return: returns two values. First cache object, second Bokeh image as dict of 'script' and 'div' """ img = None try: img = cache_obj['redis'].get(cache_obj['name']) except: cache_obj['use_redis'] = False if cache_obj['use_redis']: if img is None: cache_obj['in_cache'] = False else: img = str(img, 'utf-8') cache_obj['in_cache'] = True return cache_obj, img
def get_ending(fit_format: int) -> str: """Get the file extension for a given fit format. Args: fit_format (int): .csv (0) or .pkl (1). Raises: Exception: If an invalid fit format is provided. Returns: str: The extension for the provided fit format. """ if fit_format in [0]: ending = '.csv' elif fit_format == 1: ending = '.pkl' else: raise Exception("No such fit format: %s" % fit_format) return ending
def split(inp,n): """ Splits an input list into a list of lists. Length of each sub-list is n. :param inp: :param n: :return: """ if len(inp) % n != 0: raise ValueError i = j = 0 w = [] w2 = [] while i<len(inp): # print(i,j) if j==n: j = 0 w2.append(w.copy()) w = [] w.append(inp[i]) i += 1 j += 1 w2.append(w.copy()) return w2
def splitNotaries(lines): """Segment the txt file into chunks of information for one notary. Args: lines (list): lines from the txt file Returns: list: list of lists, each with lines for one notary """ notaryLines = [] notaryInfo = [] for i in lines: if i == '' and notaryInfo != []: notaryLines.append(notaryInfo) notaryInfo = [] elif i != '': notaryInfo.append(i) return notaryLines
def greatest_common_divisor(a: int, b: int) -> int: """ Euclid's Lemma : d divides a and b, if and only if d divides a-b and b Euclid's Algorithm >>> greatest_common_divisor(7,5) 1 Note : In number theory, two integers a and b are said to be relatively prime, mutually prime, or co-prime if the only positive integer (factor) that divides both of them is 1 i.e., gcd(a,b) = 1. >>> greatest_common_divisor(121, 11) 11 """ if a < b: a, b = b, a while a % b != 0: a, b = b, a % b return b
def right_diagonal_value(coord_x, coord_y, grid): """Return the product of 4 right diagonally adjacent cells in a dictionary. """ try: product = (grid[(coord_x, coord_y)] * grid[(coord_x + 1, coord_y + 1)] * grid[(coord_x + 2, coord_y + 2)] * grid[(coord_x + 3, coord_y + 3)]) except KeyError: return 0 return product
def ntimes(string, char): """ Return number of times character 'char' occurs in string """ return string.count(char)
def make_generic_usage_message(doc): """Construct generic usage error :param doc: Usage documentation for program :type doc: str :returns: Generic usage error :rtype: str """ return 'Unknown option\n{}'.format(doc)
def anagram(string1, string2): """Determines whether two strings provided are anagrams""" # create a list of letters in string, then sort s1 = sorted(list(string1)) s2 = sorted(list(string2)) if len(s1) != len(s2): return False else: for i in range(len(s1)): if s1[i] != s2[i]: return False else: return True
def GetHumanReadableDiskSize(size): """Returns a human readable string representation of the disk size. Args: size: Disk size represented as number of bytes. Returns: A human readable string representation of the disk size. """ for unit in ['bytes', 'KB', 'MB', 'GB', 'TB']: if size < 1024.0: return '%3.1f %s' % (size, unit) size = float(size) / 1024.0
def mutate(codon, alt, index): """ Replace (mutate) a base in a codon with an alternate base. Parameters ---------- codon : str three letter DNA sequence alt : str alternative base index : int index of the alt base in codon (0|1|2). Returns ------- str codon with alternative base Raises ------ AssertionError error if index is not valid (0|1|2) AssertionError error if base is not valid (A|T|C|G) """ assert index in [0,1,2], "Not a valid index." assert alt in ["A", "T", "C", "G"], "Not a valid base." return "".join([alt if i == index else b for i,b in enumerate(codon)])
def filter_urcline(string, filler=''): """ filter undesirable characters out of urcline string """ for bad in '\r\x00': string = string.replace(bad, filler) return string
def city_state(city, state, population=0): """Return a string representing a city-state pair.""" output_string = city.title() + ", " + state.title() if population: output_string += ' - population ' + str(population) return output_string
def accuracy_evaluation_boolean_float(attribute, values_range): """ This function calculate the accuracy considering if each value is in the provided range. :param df: The attribute to evaluate :param values_range: The range of values admitted for the attribute in analysis e.g.: values_range={"type": "float", "interval": {"min": 5, "max": 100} } :return: The accuracy of the provided attribute """ accuracy_bool=[] for row in attribute: if (row >=values_range['interval']['min']) & (row <= values_range['interval']['max']): accuracy_bool.append(True) else: accuracy_bool.append(False) return accuracy_bool.count(True) / len(accuracy_bool)
def _to_swarming_dimensions(dims): """Converts dimensions from buildbucket format to swarming format.""" return [ {'key': key, 'value': value} for key, value in (s.split(':', 1) for s in dims) ]
def stripdesc(desc): """strip trailing whitespace and leading and trailing empty lines""" return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
def nrrdvector(inp): """Parse a vector from a nrrd header, return a list.""" assert inp[0] == '(', "Vector should be enclosed by parenthesis." assert inp[-1] == ')', "Vector should be enclosed by parenthesis." return [float(x) for x in inp[1:-1].split(',')]
def check_entityname(entnamelist): """ Checks for non alpha-numeric characters in entity names. Replaces any with underscore. Directly modifies input list. Allows: -, (, ), empty space. Parameters ---------- entnamelist: list Returns ------- entnamelist: list """ allowedchars = ['-', '(', ')', ' '] for i in range(0,len(entnamelist)): name = entnamelist[i].split() name = ''.join(name) if not name.isalnum(): edited_name = ['_' if (not x.isalnum() and x not in allowedchars) else x for x in list(entnamelist[i])] edited_name = ''.join(edited_name) entnamelist[i] = edited_name return entnamelist
def _process(user_input, mapping, extra=[]): """Processes `user_input` string.""" target = user_input for symbol in mapping: target = target.replace(symbol[0], symbol[1]) for symbol in extra: target = target.replace(symbol[0], symbol[1]) # short_desc = target # Some magic... # Problems with `item.target()` in `self.on_execute()` and `short_desc` # (`target_desc`). # Presumably, this connected with Keypirinha backend (on interpreter this # works fine). # `item.target()` and `self.create_item()` splits some output unicode chars # into two (e.g. U+1D49C). # Tested with different encodings (utf-8, utf-16-le, utf-16-be, utf-32-le, # utf-32-be) - no results. mirror = "" for char in target: if char in [symbol[1] for symbol in mapping]: mirror += char short_desc = target + mirror target = target + mirror * 3 return target, short_desc
def two_fer(name = 'you'): """ name: name returns: str """ return f"One for {name}, one for me."
def make_cup_links(cups): """Reformat list of cups into a dict with cup pointing to next cups.""" cup_links = dict() prev_cup = cups[0] for cup in cups[1:]: cup_links[prev_cup] = cup prev_cup = cup cup_links[prev_cup] = cups[0] return cup_links
def add_html_hoover(text, hoover_text, text_color='', url=''): """Add a "title" to a text, using an empty href link, in order to create a mouseover""" if text_color != '': text_color = '; color: ' + text_color return '<a href="'+url+'" style="text-decoration: none' + text_color + '" title="' + hoover_text + '">' + text + '</a>'
def get_normal_form(obj_name): """Transform object name to title form. Example: risk_assessments -> Risk Assessments """ return obj_name.replace("_", " ").title()
def filename(request): """Generate export filename with search query string.""" filename = 'contacts-export' # q = request.GET.get('q') # if q: # filename = '%s_%s' % (filename,q) return filename
def proto2methodprotofunc(proto): """Convert a prototype such as 'Ljava/lang/String;' into a string 'Ljava_lang_String" so we can append that to the 'METHOD_myMethod' if its export python name contains the prototype """ return proto.replace(' ', '').replace('(', '').replace('[', '').replace( ')', '').replace('/', '_').replace(';', '')
def grompp_em(job): """Run GROMACS grompp for the energy minimization step.""" em_mdp_path = "em.mdp" msg = f"gmx grompp -f {em_mdp_path} -o em.tpr -c init.gro -p init.top --maxwarn 1" return msg
def inc_key(text): """ increment the last letter of text by one. Used to replace key in SQL LIKE case with less than """ if len(text) > 0: return text[0:-1] + chr(ord(text[-1]) + 1) else: return text
def check_result(result): """ check result """ if isinstance(result, dict) and "error" in result.keys(): return True return False
def hamming_distance(s1, s2): """ Ripped straight from https://en.wikipedia.org/wiki/Hamming_distance#Algorithm_example """ if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2))
def _err_msg(err, hostname, port): """Generate template error message. Generate a template error message with the hostname and the port: `ConnectionError. 192.168.1.50:55555`. Args: err (str): Error message to add. hostname (str): hostname. port (str|int): port. Returns: str: the error message created. """ err = "{}. {}:{}.\n".format(err, hostname, port) err += ( 'Check Sublime settings if you specified manually the address,' ' or check if plugin inside Nuke is connected.' ) return err
def compute_top_k_accuracy(labels, predictions): """Compute the accuracy of the `predictions` using `labels` as ground truth. Args: labels: An iterable of ground truth labels. predictions: A matrix where each row contains k label predictions. The true label is in the corresponding entry of `labels`. Returns: float: The accuracy. Raises: ValueError: If number of `labels` does not equal the number of rows in `predictions`. """ num_samples = len(labels) if num_samples == 0: return 0. if num_samples != len(predictions): raise ValueError("`labels` and `predictions` must have the same "\ "number of entries") num_correct = 0 for i in range(num_samples): if labels[i] in predictions[i]: num_correct += 1 return num_correct / (1. * num_samples)
def fc_params(in_features: int, out_features: int, bias: bool = True): """ Return the number of parameters in a linear layer. Args: in_features: Size of input vector. out_features: Size of output vector. bias: If true count bias too. Returns: The number of parameters. """ m = out_features + 1 if bias else out_features return in_features * m
def get_road_shiftlog_url(season, game): """ Gets the url for a page containing shift information for specified game from HTML tables for road team. :param season: int, the season :param game: int, the game :return : str, e.g. http://www.nhl.com/scores/htmlreports/20072008/TV020001.HTM """ return 'http://www.nhl.com/scores/htmlreports/{0:d}{1:d}/TV0{2:d}.HTM'.format(season, season + 1, game)
def filter_dictionary(dictionary, field_list): """ Takes dictionary and list of elements and returns dictionary with just elements specified. Also decodes the items to unicode :param dictionary: the dictionary to filter :param field_list: list containing keys to keep :return: dictionary with just keys from list. All values decoded """ return_dictionary = {} for item in dictionary: if item in field_list: return_dictionary[item] = dictionary[item] return return_dictionary
def bleu(reference, candidate): """Calculates the BLEU score of candidate sentence. Candidate sentence is compared to reference sentence using a modified form of precision: BLEU = m / w Where m is the number of words in the candidate that were found in reference and w is the total number of words in the candidate. More information here: https://en.wikipedia.org/wiki/BLEU Parameters ---------- reference : array-like A list of words of a reference sentence. The true sentence that is considred the ground truth. candidate : array-like A list of words of a candidate sentence. A sentence generated by the algorithm that needs to be evaluated. Returns ------- bleu : float BLEU score Examples -------- >>> from metrics import bleu >>> reference = ['test', 'basic'] >>> candidate = ['test', 'add'] >>> bleu(reference, candidate) 0.5 """ m = len([word for word in candidate if word in set(reference)]) w = len(candidate) if w == 0: w = 0.000000001 return m / w
def _truncate_lines(infile, line, c): """Replace long sequences of single characters with a shorter version. """ if set(line) == set(c) and len(line) > 64: line = c * 64 return line
def linear_decay_learning_rate(step, total_train_steps, initial_lr=0.1, offset=0): """Linearly decay the learning rate to 0. Args: step: a tf.scalar representing the step we want the learning rate for. total_train_steps: a number, the total number of training steps. initial_lr: initial learning rate. Decays from here. offset: a number used for finetuning. Starts the learning-rate decay schedule from this step forwards. Returns: a tf.Scalar, the learning rate for the step. """ if step < offset: return initial_lr slope = initial_lr / float(total_train_steps - offset) return initial_lr - slope * (step - offset)
def escapeXMLChars(text): """Controls characters that need to be escaped (to obtain a well-formed XML document) Args: text: The text that will be escaped (string) Returns: text: new text containing XML entities instead of characters (string) """ text = text.replace("&", "&amp;") #text = text.replace("\"", "&quot;") #text = text.replace("'", "&apos;") text = text.replace("<", "&lt;") #text = text.replace(">", "&gt;") return text
def type_to_node(type_name): """ Convert an Avro type name (with dots) to a GraphViz node identifier. """ # First double underscores type_name = type_name.replace("_", "__") # Then turn dots into underscores type_name = type_name.replace(".", "_") return type_name
def lum_double_power_law(lum, phi_star, lum_star, alpha, beta): """Evaluate a broken double power law luminosity function as a function of luminosity. :param lum: Luminosity :type lum: float or np.ndarray :param phi_star: Normalization of the broken power law at a value of lum_star :type phi_star: float :param lum_star: Break luminosity of the power law :type lum_star: float :param alpha: First slope of the broken power law :type alpha: float :param beta: Second slope of the broken power law :type beta: float :return: Value of the broken double power law at a magnitude of M :rtype: float or np.ndarray """ A = pow((lum / lum_star), alpha) B = pow((lum / lum_star), beta) return phi_star / (A + B)
def names_match(rankings, param_names): """ This convert the results in partial order of the format index into Veneer_names. rankings: dict, partial sort results parameters: dataframe, contains names of parameters """ partial_names = {} for key, value in rankings.items(): partial_names[key] = [param_names[v] for v in value] # with open(f'{fdir}{fname}', 'w') as fp: # json.dump(partial_names, fp, indent=2) return partial_names
def tier_from_site_name(s): """ Splits site name by _ (underscore), and takes only the first part that represents tier. """ split = s.split('_') tier = str(split[0]) return tier
def _build_arguments(keyword_args): """ Builds a dictionary of function arguments appropriate to the index to be computed. :param dict keyword_args: :return: dictionary of arguments keyed with names expected by the corresponding index computation function """ function_arguments = { "data_start_year": keyword_args["data_start_year"], "scale": keyword_args["scale"], "distribution": keyword_args["distribution"], "calibration_year_initial": keyword_args["calibration_start_year"], "calibration_year_final": keyword_args["calibration_end_year"], "periodicity": keyword_args["periodicity"], } return function_arguments
def getSubsetTuples(dim_dict, subset_indices): """ returns a list of tuples that represents a subset of dim_dict defined by subset_indices Inputs: dim_dict (dict): a dictionary of (key, value) = (label for the level, list of indices for the level) Most often comes from one element in the dict returned by getLevels() in the Schema_schemaname.py files subset_indices (list): a list of integers representing the levels to keep as part of the subset Outputs: a list of tuples (label, level) Notes: Easier and more robust than copying large lists of (label, level) pairs by hand """ grouping = [] items = list(dim_dict.items()) for ind in subset_indices: for label, level in items: if level[0] == ind: grouping.append((label, level)) break return grouping
def paragraph_tokenizer(text, delimiter = '\n\n'): """Given a text, break it down into paragraphs Keyword arguments: text -- given text delimiter - type of delimiter to be used, default value is '\n\n' """ paragraphs = text.split(delimiter) return paragraphs
def str_to_bool(string: str = 'False') -> bool: """ util function for smart conversion from str to bool, returns bool conversion result, returns False if not given params raises Valuerror when string cant be converted """ true_values = ['true', 'True', '1', 'on', 'yes'] false_values = ['false', 'False', '0', 'off', 'no'] if string in true_values or string == True: return True elif string in false_values or string == False: return False else: raise ValueError(f"'{string}' is ambigious")
def filter_long_docs(src, tgt, max_doc_len): """Filters too long documents together with their targets. Returns lists.""" new_src = [] new_tgt = [] for s, t in zip(src, tgt): if len(s) > max_doc_len: continue new_src.append(s) new_tgt.append(t) return new_src, new_tgt
def binary_search(items, desired_item, start=0, end=None,): """Standard Binary search program takes Parameters: items= a sorted list desired_item = single looking for a match (groovy baby) start= int value representing the index position of search section end = end boundary of the search section; when end == start Returns: None = only returned if the desired_item not found in items pos = returns the index position of desired_item if found. """ if end == None: end = len(items) if start == end: return None # raise ValueError("%s was not found in the list." % desired_item) pos = (end - start) // 2 + start if desired_item == items[pos]: return pos elif desired_item > items[pos]: return binary_search(items, desired_item, start=(pos + 1), end=end) else: # desired_item < items[pos]: return binary_search(items, desired_item, start=start, end=pos)
def form_metaquast_cmd_list(metaquast_fp, outdir, input_fasta): """format argument received to generate list to be used for metquast subprocess call Args: metaquast_fp(str): the string representing the path to metaquast executable outdir(str): the string representing the path to the output directory input_fasta(list): list of fasta files for the metaquast analysis Returns: call_args_list(list): is the sequence of commands to be passed to the metaquast call """ if metaquast_fp is '' or metaquast_fp is None: raise ValueError('metaquast_path invalid. metaquast_path name is empty') if outdir is None: raise ValueError('outdir location invalid. outdir is None') if not input_fasta: raise ValueError('input contigs invalid. no fasta files specified') # required arguments call_args_list = ['python2', metaquast_fp] # add the fasta files call_args_list.extend(input_fasta) # add the output direcotry call_args_list.extend(['-o', outdir]) return call_args_list
def remove_numbers(tokens_list:list) -> list: """Remove numbers from list of tokens. Args: tokens_list (list): list of tokens Returns: list: list of tokens with numbers removed """ return_lst = [] for lst in tokens_list: return_lst.append([s for s in lst if not s.isdigit()]) return return_lst
def get_geo_coordinates(tweet): """ Get the user's geo coordinates, if they are included in the payload (otherwise return None) Args: tweet (Tweet or dict): A Tweet object or dictionary Returns: dict: dictionary with the keys "latitude" and "longitude" or, if unavaiable, None Example: >>> from tweet_parser.getter_methods.tweet_geo import get_geo_coordinates >>> tweet_geo = {"geo": {"coordinates": [1,-1]}} >>> get_geo_coordinates(tweet_geo) {'latitude': 1, 'longitude': -1} >>> tweet_no_geo = {"geo": {}} >>> get_geo_coordinates(tweet_no_geo) #returns None """ if "geo" in tweet: if tweet["geo"] is not None: if "coordinates" in tweet["geo"]: [lat, lon] = tweet["geo"]["coordinates"] return {"latitude": lat, "longitude": lon} return None