content
stringlengths
42
6.51k
def _len_stats(sequences): """ Return the minimum, mean and maximum sequence length. >>> _len_stats(['A', 'AA', 'AAA']) (1, 2.0, 3) >>> _len_stats([]) (0, 0.0, 0) """ if not sequences: return 0, 0.0, 0 lengths = [len(seq) for seq in sequences] return min(lengths), sum(lengths) / len(lengths), max(lengths)
def obtain_ATC_levels(ATCs): """ Obtain the 5 levels of ATCs from an ATC list """ level_to_ATCs = {'level1':[], 'level2':[], 'level3':[], 'level4':[], 'level5':[]} for ATC in ATCs: level_to_ATCs['level1'].append(ATC[0]) # e.g. A level_to_ATCs['level2'].append(ATC[0:3]) # e.g. A10 level_to_ATCs['level3'].append(ATC[0:4]) # e.g. A10B level_to_ATCs['level4'].append(ATC[0:5]) # e.g. A10BA level_to_ATCs['level5'].append(ATC) # e.g. A10BA02 return level_to_ATCs
def human_to_real(iops): """Given a human-readable IOPs string (e.g. 2K, 30M), return the real number. Will return 0 if the argument has unexpected form. """ digit = iops[:-1] unit = iops[-1].upper() if unit.isdigit(): digit = iops elif digit.isdigit(): digit = int(digit) if unit == 'M': digit *= 1000000 elif unit == 'K': digit *= 1000 else: digit = 0 else: digit = 0 return digit
def get_variable_name(fname): """ This function takes a file path and returns the name of a variable. """ variables = ['demand', 'solarfarm', 'railsplitter'] split_str = fname.split('/') file_name = split_str[-1] pieces = file_name.split('_') for p in pieces: if any(p in var for var in variables): return p return
def default(languages: dict, attr: str, **kwargs) -> dict: """ :param languages: :param attr: :param kwargs: :return: """ return { language: translate[attr](**kwargs) for language, translate in languages.items() }
def gammapoisson_var(a, b): """Variance of a gamma-Poisson compound r.v. in shape-rate parameterization. This is equivalent to the negative binomial distribution, which models the number of trials k required to achieve a successes with probability p. f(k; a, b) = \int_0^\infty Poisson(k; L) * Gamma(L; a, b) dL Gamma(k + a) / 1 \ k / 1 \ a = -------------- * | ------- | * | 1 - ------- | k! Gamma(a) \ b + 1 / \ b + 1 / :param k: discrete value :param a: shape parameter (a > 0) :param b: rate parameter (b > 0) :return: the variance """ # # derivation of GaPo variance # p = 1 / (b + 1) # r = a # # return (p * r) / (1 - p) ** 2 return (a * (b + 1)) / (b ** 2)
def replace_database_in_url(url, database_name): """ Substitute the database part of url for database_name. Example: replace_database_in_url('foo/db1', 'db2') returns 'foo/db2' This will not work for unix domain connections. """ i = url.rfind("/") return f"{url[:i]}/{database_name}"
def clash(flower1, flower2): """ Test if two flowers will class at any point of their life [bloom, wilt] (A classic test for interval interception, with start: x[1] and end x[2) """ return flower1[1] <= flower2[2] and flower2[1] <= flower1[2]
def safe_equals(left: object, right: object) -> bool: """Safely check whether two objects are equal.""" try: return bool(left == right) except Exception: return False
def string_of_spaces(n): """ Creates a string of html spaces :param n {int}: number of spaces """ return "&nbsp;" * n
def getDict(symbolDictList): """Returns postprocessed output from solveEQ as dict Parameters ---------- symbolDictList list of solutions from solveEQ Returns ------- A dict containing all values of the generated variables. """ stringDict = {} for i in range(len(symbolDictList)): for key in symbolDictList[i].keys(): stringDict.setdefault(str(key), []).append(symbolDictList[i].get(key)) return stringDict
def decode_args(args, stdin_encoding): """ Convert all bytes ags to str by decoding them using stdin encoding. """ return [ arg.decode(stdin_encoding) if type(arg) == bytes else arg for arg in args ]
def to_bool(value): """ Converts 'something' to boolean. Raises exception for invalid formats Possible True values: 1, True, "1", "TRue", "yes", "y", "t" Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ... """ if str(value).lower() in ("on", "yes", "y", "true", "t", "1"): return True if str(value).lower() in ("off", "no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"): return False raise Exception('Invalid value for boolean conversion: ' + str(value))
def list_check_equal(input_list): """ Check equality of the input list items. :param input_list: input list :type input_list: list :return: result as bool """ return input_list[1:] == input_list[:-1]
def eksponen(a: int, n: int) -> int: """ Fungsi ini mengevaluasi a ^ n dengan kompleksitas waktu (O(n)) sebesar O(log n) Fungsi ini mengevaluasi a ^ n dengan memecah (divide) komponen pemangkatan dan menyelesaikan hingga pangkat dari komponen tersebut sama dengan 0 (conquer) Rumus umum : (a ^ (n/2)) * (a ^ (n/2)) jika n genap (a ^ ((n-1)/2)) * (a ^ ((n-1)/2)) * a jika n ganjil Contoh : 3 ^ 5 = (3 ^ 2) * (3 ^ 2) * (3 ^ 1) 3 ^ 2 = (3 ^ 1) * (3 ^ 1) Valid input : >>> eksponen(3, 3) 27 Invalid input : >>> eksponen(3, 2.5) Traceback (most recent call last): ... ValueError: Pangkat negatif atau pecahan """ # meng-handle invalid input if isinstance(n, int) is False or n < 0: raise ValueError("Pangkat negatif atau pecahan") # base case elif n == 0: return 1 else: x = eksponen(a, int(n / 2)) if n % 2 == 0: return x * x else: return x * x * a
def _region(region): """ Return the region argument. """ return " --region {r}".format(r=region)
def pick_term(xp: int, yp: int, zp: int, option: str) -> int: """For parm tuple of (x, y, z) and option string like 'x" or '-z'. Return the number for the option term.""" options_map = {'x': xp, 'y': yp, 'z': zp, '-x': -xp, '-y': -yp, '-z': -zp} return options_map[option]
def validate_page(page): """Validate a page number""" try: rv = int(page) except (ValueError, TypeError): rv = 1 return rv
def parseVersion(strversion): """Parse version strings of the form Protocol '/' Major '.' Minor. E.g. 'HTTP/1.1'. Returns (protocol, major, minor). Will raise ValueError on bad syntax.""" proto, strversion = strversion.split('/') major, minor = strversion.split('.') major, minor = int(major), int(minor) if major < 0 or minor < 0: raise ValueError("negative number") return (proto.lower(), major, minor)
def total_rain(rain_hours: list) -> int: """ Sum up the rain mm from list of rain hours """ if len(rain_hours) > 0: return round(sum([list(x.values())[0]['1h'] for x in rain_hours]), 2) else: return 0
def update_shape(obs_shape, act_shape, rew_shape, wrapper_names): """ Overview: Get new shape of observation, acton, and reward given the wrapper. Arguments: obs_shape (:obj:`Any`), act_shape (:obj:`Any`), rew_shape (:obj:`Any`), wrapper_names (:obj:`Any`) Returns: obs_shape (:obj:`Any`), act_shape (:obj:`Any`), rew_shape (:obj:`Any`) """ for wrapper_name in wrapper_names: if wrapper_name: try: obs_shape, act_shape, rew_shape = eval(wrapper_name).new_shape(obs_shape, act_shape, rew_shape) except Exception: continue return obs_shape, act_shape, rew_shape
def jacobi_radius(r, M_host, M_sat): """ The Jacobi Radius for a satellite on a circular orbit about an extended host, where the host is assumed to be well modeled as an isothermal sphere halo: R_j = r * (M_sat / 2 M_host(<r))}^(1/3) For MW/LMC, the Isothermal Sphere approximation is not a bad one within 50 kpc. In other contexts, can be called the Roche radius, Roche limit or Hill radius. Args: r: distance between stellite and host (kpc) M_host: host mass enclosed within r (M_sun) M_sat: satellite mass (M_sun) returns: Jacobi radius (kpc) """ return r * (M_sat / (2*M_host))**(1/3)
def first_word(text: str) -> str: """ returns the first word in a given text. """ text = text.replace('.', ' ').replace(',', ' ').strip() word_list = list(text.split()) return word_list[0] # another pattern import re return re.search("([\w']+)", text).group()
def _clean(telegram_text): """Remove markdown characters to prevent Telegram parser to fail ('_' & '*' chars).""" return telegram_text.replace('_', '\_').replace('*', '')
def from_rgb(rgb): """translates an rgb tuple of int to a tkinter friendly color code """ r, g, b = rgb return f'#{r:02x}{g:02x}{b:02x}'
def GetTechJobsAds(adList): """Filter out all tech jobs (Data/IT) from the list of all jobs""" techAdList = filter(lambda ad: ad['occupation_field']['label'] == 'Data/IT', adList) return list(techAdList)
def filter_current_symbol(view, point, symbol, locations): """ Filter the point specified from the list of symbol locations. This results in a nicer user experience so the current symbol doesn't pop up when hovering over a class definition. We don't just skip all class and function definitions for the sake of languages that split the definition and implementation. """ def match_view(path, view): fname = view.file_name() if fname is None: if path.startswith('<untitled '): path_view = view.window().find_open_file(path) return path_view and path_view.id() == view.id() return False return path == fname new_locations = [] for l in locations: if match_view(l[0], view): symbol_begin_pt = view.text_point(l[2][0] - 1, l[2][1]) symbol_end_pt = symbol_begin_pt + len(symbol) if point >= symbol_begin_pt and point <= symbol_end_pt: continue new_locations.append(l) return new_locations
def get_new_field_item(field_update): """ Get the new key-value for a field_update. """ return (field_update[0], field_update[1][1])
def _create_fieldname_to_type_map(flat_dict_list): """ This method will create a map of file names to the respective data types this is used so we can put type hints in the resultant CSV For now this will only populate integer fields Parameters: flat_dict_list(in) -- This is a list of dictionaries that are already flattened into full path:value """ fieldname_to_type = {} for flat_dict in flat_dict_list: for key, value in flat_dict.items(): # we will only keep a type if all instances are identical # otherwise leave it to the default string if key in fieldname_to_type: existing_field_type = fieldname_to_type[key] if existing_field_type != 'BAD' and existing_field_type != type(value): # MARK the mapping BAD # previously found type doesn't match newly found type mark bad so # we don't cast it into a value that won't work (leave as a string) fieldname_to_type[key] == 'BAD' # otherwise it matches elif type(value) == int: #doing it like this in case we add more types later # add a new mapping fieldname_to_type[key] = 'int' elif type(value) == bool: fieldname_to_type[key] = 'bool' return fieldname_to_type
def _capitalize(s): """Capitalize first letter. Can't use built-in capitalize function because it lowercases any chars after the first one. In this case, we want to leave the case of the rest of the chars the same. """ return s[0].upper() + s[1:]
def PDFObjHasType(o, ty): """Return True if o, a PDF Object, has type ty.""" if o is None: return False return o[0] == ty
def build_dot_value(key, value): """Build new dictionaries based off of the dot notation key. For example, if a key were 'x.y.z' and the value was 'foo', we would expect a return value of: ('x', {'y': {'z': 'foo'}}) Args: key (str): The key to build a dictionary off of. value: The value associated with the dot notation key. Returns: tuple: A 2-tuple where the first element is the key of the outermost scope (e.g. left-most in the dot notation key) and the value is the constructed value for that key (e.g. a dictionary) """ # if there is no nesting in the key (as specified by the # presence of dot notation), then the key/value pair here # are the final key value pair. if key.count('.') == 0: return key, value # otherwise, we will need to construct as many dictionaries # as there are dot components to hold the value. final_value = value reverse_split = key.split('.')[::-1] end = len(reverse_split) - 1 for idx, k in enumerate(reverse_split): if idx == end: return k, final_value final_value = {k: final_value}
def fill_list_to_length(x,l,v): """ Add the value v to list x until it reaches length l """ add_n = l - len(x) if add_n > 0: add = [v] * add_n x.extend(add) return x
def null_data_cleaner(original_data: dict, data: dict) -> dict: """ this is to remove all null parameters from data that are added during option flow """ for key in data.keys(): if data[key] == "null": original_data[key] = "" else: original_data[key]=data[key] return original_data
def ascii_to_hex(exception): """ On unicode decode error (bytes -> unicode error), tries to replace invalid unknown bytes by their hex notation. """ if isinstance(exception, UnicodeDecodeError): obj = exception.object start = exception.start end = exception.end invalid_part = obj[start:end] result = [] for character in invalid_part: # Python 2 strings if isinstance(character, str): result.append(u"\\x{}".format(character.encode("hex"))) # Python 3 int elif isinstance(character, int): result.append(u"\\{}".format(hex(character)[1:])) else: raise exception result = ("".join(result), end) return result raise exception
def xround(x, divisor=1): """Round to multiple of given number. Parameters ---------- x : float Number to round. divisor : float Number the result shall be a multiple of. Returns ------- float `x` rounded to the closest multiple of `divisor`. """ return divisor * round(x / divisor)
def problem_has_boolean_output(tables): """ The problem only has True or False outputs. :param tables: all truth tables. :return: boolean indicating whether the problem is boolean or not. """ return all([isinstance(k, bool) or k in (0, 1) for k in tables.keys()])
def swapWordCount(wordToFreq): """ wordToFreq: the dict linking word to count return freqToWord: the dict linking count to word """ freqToWord = {} for wdKey in wordToFreq: if wordToFreq[wdKey] in freqToWord: freqToWord[wordToFreq[wdKey]].append(wdKey) else: freqToWord[wordToFreq[wdKey]] = [wdKey] for freqKey in freqToWord: freqToWord[freqKey] = sorted(freqToWord[freqKey]) return freqToWord
def _derived_class(cls, base_class): """ Only matches subclasses that are not equal to the base class. """ return cls is not base_class and issubclass(cls, base_class)
def split_lines(s): """Split s into a list of lines, each of which has a trailing newline If the lines are later concatenated, the result is s, possibly with a single appended newline. """ return [l + '\n' for l in s.split('\n')]
def company_alias_generator(string: str) -> str: """The alias_generator function for Company class. Generates the aliases for the classes variables. Aliases are given by: "company" + `variable_name` except for `validate_account_information` Args: string (str): The variable name/ dictionary key of the class. Returns: str: "company" + the given string Example: >>> assert company_alias_generator("name") == "companyname" """ return "company" + string if "validate_account_information" != string else string
def getattr_(entity, attribute): """Either unpack the attribute from every item in the entity if the entity is a list, otherwise just return the attribute from the entity. Returns None if the entity is either None or empty.""" if entity in (None, []): return None if isinstance(entity, list): return [getattr(item, attribute) for item in entity] return getattr(entity, attribute)
def convert_to_list(obj): """ receives an object and if type tuple or list, return list. Else return a list containing the one object """ if type(obj) is None: return [] # None implies empty list... if type(obj) is list: return obj if type(obj) is tuple: return [x for x in obj] return [obj]
def rgb2hex(rgbl): """ Return hexidecial XML form of RGB colour tuple. Note that the xml parsing of the tuple reads bgr rather than standard rgb, so the rgb tuple is reversed in order on output to represent the true colour """ return "ff{0:02x}{1:02x}{2:02x}".format(int(rgbl[2]), int(rgbl[1]), int(rgbl[0]))
def unit_coordinates_to_image_coordinates(y_current, x_current, center, height, width): """ y, x order """ y_current = [[y0 * height, y1 * height] for y0, y1 in y_current] x_current = [[x0 * width, x1 * width] for x0, x1 in x_current] center[0] = center[0] * height center[1] = center[1] * height return y_current, x_current, center
def bool_value(value): """ Converts the given object to a bool if it is possible. Parameters ---------- value : object Object to be converted to bool. Returns ------- value : bool Bool value. Raises ------ ValueError If the object cannot be converted to bool. """ if isinstance(value, bool): return value if hasattr(value, 'render'): value = value.render() if value in ['true', '1']: return True elif value in ['false', '0']: return False raise ValueError('Invalid bool value: {!r}'.format(value))
def reorder_circle(circle): """Reorder the circle elements so that it starts from the lowest element and goes to the direction of the smallest of two possible second elements. For example, the circle "4736201" becomes "0147362". The circle "32187654" becomes "12345678".""" length = len(circle) assert length == len(set(circle)), "Not all elements of the circle are unique!" digits = [int(i) for i in list(circle)] new_circle = str(min(digits)) start_index = circle.find(new_circle) neighbors = [circle[start_index-1], circle[(start_index+1) % length]] if int(neighbors[0]) > int(neighbors[1]): new_circle += circle[start_index+1:] new_circle += circle[:start_index] else: new_circle += circle[:start_index][::-1] # Reverse direction new_circle += circle[start_index+1:][::-1] # Reverse direction assert len(new_circle) == len(circle), "Wrong length of the new circle!" assert set(new_circle) == set(circle), "Wrong elements in the new circle!" return new_circle
def skip_add(n): """ Takes a number n and returns n + n-2 + n-4 + n-6 + ... + 0. >>> skip_add(5) # 5 + 3 + 1 + 0 9 >>> skip_add(10) # 10 + 8 + 6 + 4 + 2 + 0 30 >>> # Do not use while/for loops! >>> from construct_check import check >>> # ban iteration >>> check(this_file, 'skip_add', ... ['While', 'For']) True """ "*** YOUR CODE HERE ***" if n == 0 or n == 1: return n return skip_add(n-2) + n
def get_scaled_size(bytes, suffix="B"): """ Credit to PythonCode for this function.\n > https://www.thepythoncode.com/article/get-hardware-system-information-python\n Scale bytes to its proper format\n e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' (> string) """ factor = 1024 for unit in ["", "K", "M", "G", "T", "P"]: if bytes < factor: return f"{bytes:.2f}{unit}{suffix}" bytes /= factor
def convert_to_abmag(value, name): """ Convert magnitude to AB magnitude Parameters ---------- value : float Value of the band name : str Name of the band as stated in the GSC column name. Options are: 2MASS: tmassJMag, tmassHMag, tmassKsMag SDSS: SDSSgMag, SDSSiMag, SDSSzMag GSC: JpgMag, FpgMag, IpgMag """ mag_constants = { 'tmassJMag': 0.90, 'tmassHMag': 1.37, 'tmassKsMag': 1.85, 'SDSSuMag': 0.0, 'SDSSgMag': 0.0, 'SDSSrMag': 0.0, 'SDSSiMag': 0.0, 'SDSSzMag': 0.0, 'JpgMag': -0.055, 'FpgMag': 0.24, 'NpgMag': 0.48, } abmag = value + mag_constants[name] return abmag
def sort_by_dependency(names, images): """ Make sure images that have dependencies are last """ dependencies = [] while len(names): name = names.pop(0) # get the first image name if name in images: definition = images[name] # check if it has a dependency if 'from' in definition and definition['from'] in names: # Move this one to the back, because we first need to # remove another image that this one depends on names.append(name) else: dependencies.append(name) return dependencies
def get_strip_strings_array(strings): """ takes a comma separated string and returns a list of strings using the comma as the delimiter example: 'HMC, V7000 ' -> ['HMC','V7000'] args: strings: comma separated string list returns: string[] list of strings """ string_array = strings.strip() string_array = string_array.split(',') result = [] for string in string_array: string = string.strip() if string: result.append(string) return result
def lam2f(l): """ Computes the photon frequency in Hz Parameters ---------- l : float Photon wavelength in m Returns ------- f : float Frequency in Hz """ f = 299792458/l return f
def evaluate_distinct(seed_set_cascades): """ Measure the number of distinct nodes in the test cascades started from the seed set """ combined = set() for i in seed_set_cascades.keys(): for j in seed_set_cascades[i]: combined = combined.union(j) return len(combined)
def has_spdx_text_in_scancode_output(scancode_output_data_file_licenses): """Returns true if at least one license in the scancode output has the spdx identifier.""" return any( 'spdx' in scancode_output_data_file_license['matched_rule']['identifier'] for scancode_output_data_file_license in scancode_output_data_file_licenses )
def unshared_copy(inList): """perform a proper deepcopy of a multi-dimensional list (function from http://stackoverflow.com/a/1601774)""" if isinstance(inList, list): return list( map(unshared_copy, inList) ) return inList
def convert_tvars_to_dict(tvars, name2nparr=None): """ convert the input training variables to a dictionary :param tvars: the tensorflow trainable variables :param name2nparr: a dictionary in same format with output, if given, the converted tvars will be added to this :return: the dictionary in the format of {dict: np.Array} """ if not name2nparr: name2nparr = {} name2nparr.update({var.name: var.numpy() for var in tvars}) return name2nparr
def df_if_two_one(value): """ Final Data Cleaning Function - This is run against station, latitude, longitude, and elevation for indidividual records - Many of these records have usable data, so don't want to just throw them out. - Example issues: - Instead of a station of '000248532' a value may contain '000248532 000248532' - Both are equal - function returns the first one - Instead of a latitude of '29.583' a value may contain '29.583 29.58333333' - This is from raw csv data files where they changed the number of decimal points userd part of the way through a year. - Function converts both to integers, which rounds up to the nearest whole number. If both whole numbers are equal, then the function returns the first value from the original pair. - exception handler: - ValueError -> str of '': Looks like some empty strings for latitude and/or longitude slipped through data cleaning. This handles those. Args: value (str): value to check and clean if needed Returns: str """ try: split = value.split(' ') if len(split) > 1: if '.' in split[0]: if int(float(split[0])) == int(float(split[1])): return split[0] elif split[0] == split[1]: return split[0] return value except ValueError as e: if "could not convert string to float: ''" not in str(e): raise ValueError(e) return value
def num_to_activation(num): """ Converts num to respective activation function :param num: activation function num :return: activation function string """ d = { 0: 'LeakyReLU', 1: 'relu', 2: 'tanh', 3: 'sigmoid', } return d[num]
def bitstring2expr(bitstrings, variable_list): """Converts List of Bitstrings to Boolean expressions""" string = '' ret_list = [] for bitstring in bitstrings: tmp_list = [] var = 0 for character in reversed(bitstring): if character != '-': if character == '1': tmp_list.append(variable_list[var]) else: tmp_list.append('~' + variable_list[var]) var = var + 1 term = ' & '.join(tmp_list) ret_list.append(term) ret = ' | '.join(ret_list) return ret
def changeColor(r, g, b, dataset, oldColors): """Callback to set new color values. Positional arguments: r -- Red value. g -- Green value. b -- Blue value. dataset -- Currently selected dataset. oldColors -- Previous colors in case none values are provided for r/g/b. """ if r == None or b == None or g == None: return oldColors else: colorDict = oldColors colorString = 'rgb(' + str(r) + ', ' + str(g) + ', ' + str(b) + ')' colorDict.update({dataset: colorString}) return colorDict
def _succ(p,l): """ retrieve the successor of p in list l """ pos = l.index(p) if pos+1 >= len(l): return l[0] else: return l[pos+1]
def color_str(color, raw_str): """Format a string with color. :param color: a color name, can be r, g, b or y :param raw_str: the string to be formatted :returns: a colorful string """ if color == 'r': fore = 31 elif color == 'g': fore = 32 elif color == 'b': fore = 36 elif color == 'y': fore = 33 else: fore = 37 color = "\x1B[%d;%dm" % (1, fore) return "%s%s\x1B[0m" % (color, raw_str)
def sliced_by_n(images, n=500): """Slice the images by sequences of n elements.""" return [images[i : i + n] for i in range(0, len(images), n)]
def _compose_export_url( fhir_url: str, export_scope: str = "", since: str = "", resource_type: str = "", container: str = "", ) -> str: """Generate a query string for the export request. Details in the FHIR spec: https://hl7.org/fhir/uv/bulkdata/export/index.html#query-parameters""" export_url = fhir_url if export_scope == "Patient" or export_scope.startswith("Group/"): export_url += f"/{export_scope}/$export" elif export_scope == "": export_url += "/$export" else: raise ValueError("Invalid scope {scope}. Expected 'Patient' or 'Group/[ID]'.") # Start with ? url argument separator, and change it to & after the first parameter # is appended to the URL separator = "?" if since: export_url += f"{separator}_since={since}" separator = "&" if resource_type: export_url += f"{separator}_type={resource_type}" separator = "&" if container: export_url += f"{separator}_container={container}" separator = "&" return export_url
def HFSS3DLayout_AdaptiveFrequencyData(freq): """Update HFSS 3D adaptive frequency data. Parameters ---------- freq : float Adaptive frequency value. Returns ------- list List of frequency data. """ value = [("AdaptiveFrequency", freq), ("MaxDelta", "0.02"), ("MaxPasses", 10), ("Expressions", [], None)] return value
def filterYif(item): """ filters an ASSET CONTROL generated csv list to find items that belong to Yifei Li """ return 'Yifei' in item['Custodian'] or item['Location']=='DION-320'
def get_colab_github_url(relative_path: str, repository: str, branch: str) -> str: """Get the URL that a file will have on Google Colab when hosted on GitHub.""" return f"https://colab.research.google.com/github/{repository}/blob/{branch}/{relative_path}"
def get_access(ioctl_code): """Returns the correct access type name for a 32 bit IOCTL code""" access_names = [ 'FILE_ANY_ACCESS', 'FILE_READ_ACCESS', 'FILE_WRITE_ACCESS', 'FILE_READ_ACCESS | FILE_WRITE_ACCESS', ] access = (ioctl_code >> 14) & 3 return access_names[access], access
def tagNameString( name ): """Return "" if name is None, otherwise return name surrounded by parentheses and double quotes.""" return "" if name is None else "(\"{}\")".format( name )
def _bcd2bin(value): """Convert binary coded decimal to Binary :param value: the BCD value to convert to binary (required, no default) """ return value - 6 * (value >> 4)
def magic_index(a): """ The obvious solution. """ return any(i == x for i, x in enumerate(a))
def quad2list2_receipt_v1(quad): """ convert to list of list """ return [ [quad["x1"], quad["y1"]], [quad["x2"], quad["y2"]], [quad["x3"], quad["y3"]], [quad["x4"], quad["y4"]], ]
def get_reference_data(p): """Summarise the bibliographic data of an article from an ADS query Returns dict of 'author' (list of strings), 'title' (string), and 'ref' (string giving journal, first page, and year). """ data = {} try: data['author'] = p.author except: data['author'] = 'Anon' try: data['title'] = p.title except: data['title'] = 'Untitled' try: refstring = p.pub except: refstring = 'Unknown' try: refstring += f' {p.volume}, {p.page[0]}' except: pass try: refstring += f' ({p.year})' except: pass data['ref'] = refstring return data
def mean_labels(input_dict): """ Function to calculate the macro-F1 score from labels Args: input_dict (dict): classification report dictionary Returns: (float): macro-F1 score """ sum_f1 = (float(input_dict["3"]["f1-score"]) + float(input_dict["4"]["f1-score"]) + float(input_dict["5"]["f1-score"])) return sum_f1 / 3
def comp_list_of_dicts(list1, list2): """ Compare list of dictionaries. :param list1: First list of dictionaries to compare :type list1: list of dictionaries :param list2: Second list of dictionaries to compare :type list2: list of dictionaries :rtype: boolean """ for item in list1: if item not in list2: print("List1 item not in list2:") print(item) return False for item in list2: if item not in list1: print("List2 item not in list1:") print(item) return False return True
def _to_var_name(s): """ Remove,hyphens,slashes,whitespace in string so that it can be used as an OrientDB variable name. """ r = s.replace("'",'prime') table = str.maketrans(dict.fromkeys('.,!?_ -/<>{}[]()+-=*&^%$#@!`~.\|;:"')) chars_to_remove = ['.', '!', '?', '_', '-', '/', '>', '<', '(', ')', '+', '-', '*', ',', '?', ':', ';', '"', '[', ']', '{', '}', '=', '^', '%', '$', '#', '@', '!', '`', '~'] r = r.translate(table) if len(r) and r[0].isdigit(): r = 'a'+r return r
def dl_ia_utils_memory_usage(df): """ Calculate and print the memory usage and shape by the dataframe :param df: :return: """ error = 0 try: print('{} Data Frame Memory usage: {:2.2f} GB'.format('-' * 20, df.memory_usage(deep=True).sum() / 1000000000)) print('{} Data Frame Shape: {} '.format('-' * 20, df.shape)) except Exception as exception_msg: error = 1 print('(!) Error in dl_ia_utils_memory_usage: ' + str(exception_msg)) return error
def _merge(dict1, dict2): """Merge two dicts, dict2 takes precedence.""" return {**dict1, **dict2}
def find_max_str(smiles: str) -> str: """ General functionality to choose a multi-smiles string, containing the longest string """ smiles = max(smiles.split("."), key=len) return smiles
def create_agent_params(dim_state, actions, ep_returns, ep_losses, mean, std, layer_sizes, discount_rate, learning_rate, batch_size, memory_cap, update_step, decay_period, init_eps, final_eps): """ Create agent parameters dict based on args """ agent_params = {} agent_params["dim_state"] = dim_state agent_params["actions"] = actions agent_params["ep_returns"] = ep_returns agent_params["ep_losses"] = ep_losses agent_params["mean"] = mean agent_params["std"] = std agent_params["layer_sizes"] = layer_sizes agent_params["discount_rate"] = discount_rate agent_params["learning_rate"] = learning_rate agent_params["batch_size"] = batch_size agent_params["memory_cap"] = memory_cap agent_params["update_step"] = update_step agent_params["decay_period"] = decay_period agent_params['init_eps'] = init_eps agent_params['final_eps'] = final_eps return agent_params
def in_box(coords, box): """Return true if coordinates are in box.""" if box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]: return True return False
def transpose(pcl_file, outfile): """ Transpose the merged pcl and metadata file :param pcl_file: String; file that is the merge of the otu and metadata :param outfile: String; file that is the transpose of the pcl_file External dependencies - Maaslin: https://bitbucket.org/biobakery/maaslin """ cmd="transpose.py < " + pcl_file + " > " + outfile return { "name": "transpose: " + pcl_file, "actions": [cmd], "file_dep": [pcl_file], "targets": [outfile] }
def group_by(l, column, comp_margin=None): """ Groups list entities by column values. """ sorted_values = [] result_list = [] current_value = None for i in range(0, len(l)): x = l[i] if x[column][0:comp_margin] in sorted_values or x[column][0:comp_margin] == current_value: continue current_value = x[column][0:comp_margin] current_list = [] for j in range(i, len(l)): y = l[j] if y[column][0:comp_margin] != current_value: continue current_list.append(y) sorted_values.append(current_value) result_list.append(current_list) return result_list
def str_to_bool(text): """ Parses a boolean value from the given text """ return text and text.lower() in ['true', 'y', 'yes', '1']
def to_set(labels_list): """given a list of labels from annotations, return the set of (unique) labels Parameters ---------- labels_list : list of lists, i.e. labels from annotations Returns ------- labelset Examples -------- >>> labels_list = [voc.annot.labels for voc in vds.voc_list] >>> labelset = to_set(labels_list) """ all_labels = [lbl for labels in labels_list for lbl in labels] labelset = set(all_labels) return labelset
def char_lookup(c: str, table: dict, default=None): """ Translate a char into a value by looking up from a dict. Args: c: char to translate table: a dict to translate the char by looking up default: If not None, this is the value to return in case the char does not exist in the table. Returns: Translated value for the char by looking up the char into table. """ if c in table: # Match word in case-sentitive mode is the first priority return table[c] else: if default is not None: return default else: raise KeyError('Key `{}` not found'.format(c))
def filename(select: str) -> str: """ Return the filename portion of a colon-separated selection. """ return select.split(":")[0]
def to_nested_dict(d, delim='.', copy=True): """TLDR; flat: {"a.b.c":0} # pop 'a.b.c' and value 0 and break key into parts parts: ['a','b','c']: # process 'a' flat <- {'a':dict()} # process 'b' flat <- {'a': {'b': dict()}} # process 'c' @ tmp[parts[-1]] = val flat <- {'a': {'b': {'c': 0}}} """ flat = dict(d) if copy else d # we copy the keys since we are modifying the dict in place keys = list(d) for key in keys: # Basic idea: for all keys that contain the delim if delim in key: val = flat.pop(key) # get the parts (a.b.c -> [a, b, c]) parts = key.split(delim) # we start with the outer dict, but as we process parts of the key level = flat # we assign level to the newly created deeper dicts for part in parts[:-1]: if part not in level: # if the part isn't a key at this depth level[part] = dict() # create a new dict to fill level = level[part] # go deeper into the dict level[parts[-1]] = val # when we get to the "leaf" set it as val return flat
def token_at_cursor( code, pos=0 ): """ Find the token present at the passed position in the code buffer """ l = len(code) end = start = pos # Go forwards while we get alphanumeric chars while end<l and code[end].isalpha(): end+=1 # Go backwards while we get alphanumeric chars while start>0 and code[start-1].isalpha(): start-=1 # If previous character is a %, add it (potential magic) if start>0 and code[start-1] == '%': start -= 1 return code[start:end], start
def validate_stdout(stdout): """ :param stdout: :return: true if stdout does not indicate test failure """ # Todo: support should_panic tests (Implementation on hermit side with custom panic handler) if "!!!PANIC!!!" in stdout: return False return True
def get_yes_no(value): """ coercing boolean value to 'yes' or 'no' """ return 'yes' if value else 'no'
def _repeatlast(numfields,listin): """repeat last item in listin, until len(listin) = numfields""" if len(listin) < numfields: last = listin[-1] for n in range(len(listin),numfields): listin.append(last) return listin
def format_bucket(bucket): """Formats the bucket to a string. :params ``int`` bucket: Bucket number :returns: ``str`` - Formatted (0 padded) bucket number. """ return '{:06d}'.format(bucket)
def rgb_to_brg(rgb_colour): """Reorders RGB colour value to that used by DAWN's LED strip""" return ( rgb_colour[2], rgb_colour[0], rgb_colour[1] )
def create_session_string(prefix, fmat, orb, rootsift, ratio, session): """Create an identifier string from the most common parameter options. Keyword arguments: prefix -- custom string appended at the beginning of the session string fmat -- bool indicating whether fundamental matrices or essential matrices are estimated orb -- bool indicating whether ORB features or SIFT features are used rootsift -- bool indicating whether RootSIFT normalization is used ratio -- threshold for Lowe's ratio filter session -- custom string appended at the end of the session string """ session_string = prefix + '_' if fmat: session_string += 'F_' else: session_string += 'E_' if orb: session_string += 'orb_' if rootsift: session_string += 'rs_' session_string += 'r%.2f_' % ratio session_string += session return session_string
def init(i): """ Not to be called directly. Sets the path to the vqe_plugin. """ return {'return':0}
def stringify_klasses_data(klasses_by_module): """ Convert all the objects (modules and klasses) to their string names. """ klasses_data = {} for module, klasses in klasses_by_module.items(): module_name = module.__name__ klasses_data[module_name] = [] for klass in klasses: klasses_data[module_name].append(klass.__name__) return klasses_data
def parse_tle_float(s): """Parse a floating point with implicit dot and exponential notation. >>> parse_tle_float(' 12345-3') 0.00012345 >>> parse_tle_float('+12345-3') 0.00012345 >>> parse_tle_float('-12345-3') -0.00012345 """ return float(s[0] + '.' + s[1:6] + 'e' + s[6:8])
def structure(object,hashable=True): """Returns an object describing the hierarchical structure of the given object (eliminating the values). Structures can be then compared via equality testing. This can be used to more quickly compare two structures than structureMatch, particularly when hashable=True. If hashable = True, this returns a hashable representation. Otherwise, it returns a more human-readable representation. """ if isinstance(object,(list,tuple)): res= [structure(v) for v in object] if all(v is None for v in res): #return a raw number return len(res) if hashable: return tuple(res) return res elif isinstance(object,dict): res = dict() for k,v in object.items(): res[k] = structure(v) if hashable: return tuple(res.items()) return res else: return None