content
stringlengths
42
6.51k
def compare_languge(language, lang_list): """ Check if language is found """ found = False for l in lang_list: if language == l.lower(): found = True break return found
def find_missing(input_list): """ Find the missing number in shuffled list """ # Mathematical formula for finding the sum of consequtive natural numbers # (N*(N+1))/2 total = (len(input_list) * (len(input_list) +1)) / 2 summed = 0 for element in input_list: summed += element missing = total - summed return missing
def expanded_bb( final_points): """computation of coordinates and distance""" left, right = final_points left_x, left_y = left right_x, right_y = right base_center_x = (left_x+right_x)/2 base_center_y = (left_y+right_y)/2 dist_base = abs(complex(left_x, left_y)-complex(right_x, right_y ) ) return (int(base_center_x), int(base_center_y) ), dist_base
def dategetter(date_property, collection): """ Attempts to obtains a date value from a collection. :param date_property: property representing the date :param collection: dictionary to check within :returns: `str` (ISO8601) representing the date. ('..' if null or "now", allowing for an open interval). """ value = collection.get(date_property, None) if value == 'now' or value is None: return '..' return value.isoformat()
def get_col(lut, idx, range): """ Get RGB color for an index within a range, using a lookup table """ lower, upper = range[0], range[1] if lower == upper: pos = 0 else: pos = (idx - range[0]) / (range[1] - range[0]) return lut[int(pos*(len(lut)-1))]
def conv_s2hms(seconds, short=False): """ Converts seconds to hours, minutes and seconds. :param seconds: The time in seconds to use :type seconds: int :return: str """ seconds = int(seconds) hours = seconds / 3600 seconds -= 3600 * hours minutes = seconds / 60 seconds -= 60 * minutes if hours == 0 and short is True: return "%02d:%02d" % (minutes, seconds) return "%02d:%02d:%02d" % (hours, minutes, seconds)
def is_proper_component_name(component): """Check if the component name is properly formatted.""" return "@" not in component and "/" not in component
def line_intersection(line1, line2): """! Returns the instersection coordinate between two lines @param line1 `tuple` line 1 to calculate intersection coordinate @param line2 `tuple` line 2 to calculate intersection coordinate @return `tuple` intersection cord between line 1 and line 2 """ xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0]) ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1]) def det(a, b): return a[0] * b[1] - a[1] * b[0] div = det(xdiff, ydiff) if div == 0: raise Exception("lines do not intersect") d = (det(*line1), det(*line2)) x = det(d, xdiff) / div y = det(d, ydiff) / div return int(round(x)), int(round(y))
def areSeqCluParameters(parameters: list) -> bool: """ This method verifies whether or not a list of parameters has the format of parameters that are used to configure the 'SeqClu-PV' algorithm. :param parameters: A list of parameters for which needs to be verified whether or not it has the format of parameters that are used to configure the 'SeqClu-PV' algorithm. :return: A boolean value indicating whether or not the given list of parameters has the format of parameters that are used to configure the 'SeqClu-PV' algorithm. """ if len(parameters) != 5: return False return type(parameters[0]) == int and type(parameters[1]) == float \ and type(parameters[2]) == float and type(parameters[3]) == bool and type(parameters[4]) == bool
def make_cookie_values(cj, class_name): """ Makes a string of cookie keys and values. Can be used to set a Cookie header. """ path = "/" + class_name cookies = [c.name + '=' + c.value for c in cj if c.domain == "class.coursera.org" and c.path == path] return '; '.join(cookies)
def extract_friends(json_handle: dict): """ Extracting list of friends. :param json_handle: data handle :return: List with friends IDs """ list = [] for item in json_handle.keys(): list.append(int(item)) return sorted(list)
def make_add_rich_menu(name, size, areas): """ add rich menu content: reference - `Common Message Property <https://developers.worksmobile.com/jp/document/1005040?lang=en>`_ You can create a rich menu for the message bot by following these steps: 1. Image uploads: using the "Upload Content" API 2. Rich menu generation: using the "Register Message Rich Menu" API 3. Rich Menu Image Settings: Use the "Message Rich Menu Image Settings" API """ return {"name": name, "size": size, "areas": areas}
def get_thumbnail_size(image_size, thumbnail_height): """ Computes the size of a thumbnail :param image_size: original image size :param thumbnail_height: thumbnail height :return: thumbnail size tuple """ width = round((float(thumbnail_height) / image_size[1]) * image_size[0]) return width, thumbnail_height
def get_edit_distance(s1, s2): """Calculate the Levenshtein distance between two normalised strings Adopted from example in https://stackoverflow.com/questions/2460177/edit-distance-in-python See https://en.wikipedia.org/wiki/Edit_distance @param s1: the first string to compare @param s2: the second string to compare @returns: an integer giving the edit distance """ if len(s1) > len(s2): s1, s2 = s2, s1 distances = range(len(s1) + 1) for i2, c2 in enumerate(s2): distances_ = [i2+1] for i1, c1 in enumerate(s1): if c1 == c2: distances_.append(distances[i1]) else: distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1]))) distances = distances_ return distances[-1]
def memoize(cached_function): """ Memoization decorator for functions taking one or more arguments. """ # http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/ class MemoDict(dict): def __init__(self, cached_function): super().__init__() self.cached_function = cached_function def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.cached_function(*key) return ret return MemoDict(cached_function)
def order_mirrored_vertex_points(vertices, plane): """ Order mirrored vertex points to keep consistent global system Args: :vertices: (tuple) vertices like (a, b, c, d) :plane: (str) plane ('xy', 'xz' or 'yz') Returns: :ordered_vertices: (tuple) ordered vertices """ a, b, c, d = vertices if plane == 'xy' or plane == 1: pass elif plane == 'xz' or plane == 2: a, b, c, d = b, a, d, c elif plane == 'yz' or plane == 3: a, b, c, d = d, c, b, a else: raise ValueError(f"Invalid plane (plane: '{plane}')") return (a, b, c, d)
def init_parameters(parameter): """Auxiliary function to set the parameter dictionary Parameters ---------- parameter: dict See the above function inverseSTFT for further information Returns ------- parameter: dict """ parameter['costFunc'] = 'KLDiv' if 'costFunc' not in parameter else parameter['costFunc'] parameter['numIter'] = 30 if 'numIter' not in parameter else parameter['numIter'] parameter['fixW'] = False if 'fixW' not in parameter else parameter['fixW'] return parameter
def _get_requirements_to_disable(old_requirements, new_requirements): """ Get the ids of 'CreditRequirement' entries to be disabled that are deleted from the courseware. Args: old_requirements(QuerySet): QuerySet of CreditRequirement new_requirements(list): List of requirements being added Returns: List of ids of CreditRequirement that are not in new_requirements """ requirements_to_disable = [] for old_req in old_requirements: found_flag = False for req in new_requirements: # check if an already added requirement is modified if req["namespace"] == old_req.namespace and req["name"] == old_req.name: found_flag = True break if not found_flag: requirements_to_disable.append(old_req.id) return requirements_to_disable
def is_folder(obj): """ Checks if object is a vim.Folder. :param obj: The object to check :return: If the object is a folder :rtype: bool """ return hasattr(obj, "childEntity")
def mock_tqdm(*args, **kwargs): """ Parameters ---------- Returns ------- """ if args: return args[0] return kwargs.get('iterable', None)
def pair_issue(issue_list1, issue_list2): """ Associates pairs of issues originating from original and canonical repositories. :param issue_list1: list of IssueDir :type issue_list1: array :param issue_list2: list of IssueDir :type issue_list2: array :return: list containing tuples of issue pairs [(issue1, issue2), (...)] :rtype: list """ dict1 = {} pairs = [] for i in issue_list1: s_i = "-".join([i[0], str(i[1]), i[2]]) dict1[s_i] = i for j in issue_list2: s_j = "-".join([j[0], str(j[1]), j[2]]) if s_j in dict1: pairs.append((dict1[s_j], j)) return pairs
def from_string(unicode_string): """ Converts the given String to a byte array. :param unicode_string: The String to be converted to a byte array. :return: A byte array representing the String. """ result = None if unicode_string is not None: result = bytearray(unicode_string.encode("utf-8")) return result
def to_lower(x): """Lower case.""" return x.lower()
def left_shift(data): """ >>> left_shift("0123456789") '1234567890' """ return data[1:] + data[0]
def markup(text, annotations, positive_class=True): """Given a text and a list of AnnotatedSpan objects, inserts HTML <span> tags around the annotated areas.""" last_char = 0 doc_markedup = [] for start, end, word, weight, level in annotations: doc_markedup.append(text[last_char:start]) doc_markedup.append('<span class="%s-%d" title="%s (%0.3f)">%s</span>' % ('pos' if (weight > 0) == positive_class else 'neg', level, word, weight, text[start:end])) last_char = end doc_markedup.append(text[last_char:]) return ''.join(doc_markedup)
def check(header, key, value): """ If *var* is not `None` first check that *var* and *value* match. Return the parsed *value*. """ if value is None: return header[key] assert header[key] == value return value
def es_letra_atril(event): """Verifica que el evento reciente sea un click sobre el atril del usuario.""" return isinstance(event, int)
def complete_sequence(seq): """ Seq is normalized by capitalizing all of the characters :param seq: sequence to test :return: True if sequence only contains A, C, T, or G, False if contains N or any other character """ allowed = set('ACTG') return set(seq.upper()).issubset(allowed)
def format_percent(num: float, force_sign: bool = False) -> str: """Formats a decimal ratio as a percentage.""" prefix = '+' if force_sign and num > 0 else '' return '{}{:,.1f}%'.format(prefix, num * 100)
def noop_warmup(agent, env, history, args): """Warm up with noop.""" return agent, env, history, args
def CreateSizesExternalDiagnostic(sizes_guid): """Creates a histogram external sizes diagnostic.""" benchmark_diagnostic = { 'type': 'GenericSet', 'guid': str(sizes_guid), 'values': ['sizes'], } return benchmark_diagnostic
def prefixe(arbre): """Fonction qui retourne le parcours prefixe de l'arbre sous la forme d'une liste """ liste = [] if arbre != None: liste.append(arbre.get_val()) liste += prefixe(arbre.get_ag()) liste += prefixe(arbre.get_ad()) return liste
def _isredirect(values): """Check if rewrite values is redirect. """ return [v.split().pop() in ('redirect', 'permanent') for v in values]
def get_obj_path_name(object): """ Get the full correct name of the provided object. :param object: UObject :return: String of the Path Name """ if object: return object.PathName(object) else: return "None"
def find_history_replacements_active_at(objects, time): """Return dictionary mapping object pk to object or its history object at the time, if any. Same caveats as for find_history_active_at applies.""" if not objects: return {} # automatically figure out how to query history model history_model = objects[0].history_set.model # core_filters contains something like "group__exact": obj relation_name = objects[0].history_set.core_filters.keys()[0].replace("__exact", "") # now the querying is a bit tricky - we are only interested in the # history version just before time, or if we can't get that, the # one just after, but lacking a good way of expressing that # through SQL we just grab all of them and sort it out ourselves changed_objects = [o for o in objects if o.time > time] histories = history_model.objects.filter(**{ relation_name + "__in": changed_objects }).order_by(relation_name, "-time") history_for_obj = { o.pk: o for o in objects } skip = None for h in histories: obj_id = getattr(h, relation_name + "_id") if obj_id == skip: continue history_for_obj[obj_id] = h if h.time <= time: skip = obj_id # we're far enough, go to next obj return history_for_obj
def hus_to_h2o(hus): """Calculate H2O in vmr instead of specific humidity.""" mda = 28.966 # molecular mass of dry air mwv = 18.016 # molecular mass of water vapour h2o = mda / mwv * hus / (1.0 - hus) return h2o
def _unicode_char(char): # pragma: no cover """Return true if character is Unicode (non-ASCII) character.""" try: char.encode("ascii") except UnicodeEncodeError: return True return False
def c_any(iterable): """ Implements python 2.5's any() """ for element in iterable: if element: return True return False
def reposition_bounding_box(bbox, tile_location): """Relocates bbox to the relative location to the original image. Args: bbox (int, int, int, int): bounding box relative to tile_location as xmin, ymin, xmax, ymax. tile_location (int, int, int, int): tile_location in the original image as xmin, ymin, xmax, ymax. Returns: A list of points representing the location of the bounding box relative to the original image as xmin, ymin, xmax, ymax. """ bbox[0] = bbox[0] + tile_location[0] bbox[1] = bbox[1] + tile_location[1] bbox[2] = bbox[2] + tile_location[0] bbox[3] = bbox[3] + tile_location[1] return bbox
def get_wind_direction(degrees): """ :param degrees: integer for degrees of wind :return: string of the wind direction in shorthand form """ try: degrees = int(degrees) except ValueError: return '' if degrees < 23 or degrees >= 338: return 'N' elif degrees < 68: return 'NE' elif degrees < 113: return 'E' elif degrees < 158: return 'SE' elif degrees < 203: return 'S' elif degrees < 248: return 'SW' elif degrees < 293: return 'W' elif degrees < 338: return 'NW'
def add_parameter_group_to_list_of_dicts( Dlist, names, values ): """ Copies and returns the given list of dictionaries but with names[0]=values[0] and names[1]=values[1] etc added to each. """ assert len(names) == len(values) N = len(names) new_Dlist = [] for D in Dlist: newD = D.copy() for i in range(N): newD[ names[i] ] = values[i] new_Dlist.append( newD ) return new_Dlist
def addKey(s1, s2): """Add two keys in GF(2^4)""" return [i ^ j for i, j in zip(s1, s2)]
def scalar_eq(a, b, precision=0): """Check if two scalars are equal. Keyword arguments: a -- first scalar b -- second scalar precision -- precision to check equality Returns: True if scalars are equal """ return abs(a - b) <= precision
def get_trf_command(command, transformation=""): """ Return the last command in the full payload command string. Note: this function returns the last command in job.command which is only set for containers. :param command: full payload command (string). :param transformation: optional name of transformation, e.g. Sim_tf.py (string). :return: trf command (string). """ payload_command = "" if command: if not transformation: payload_command = command.split(';')[-2] else: if transformation in command: payload_command = command[command.find(transformation):] # clean-up the command, remove '-signs and any trailing ; payload_command = payload_command.strip() payload_command = payload_command.replace("'", "") payload_command = payload_command.rstrip(";") return payload_command
def split_by_unescaped_sep(text, sep=':'): """Split string at sep but only if not escaped.""" def remerge(s): # s is a list of strings. for i in range(len(s) - 1): n_esc = len(s[i]) - len(s[i].rstrip('\\')) if n_esc % 2 == 0: continue else: new_s = s[:i] + [s[i] + sep + s[i + 1]] + s[i + 2:] return remerge(new_s) return s # split by every sep (even unescaped ones) # then re-merge strings that end in an uneven number of escape chars: return remerge(text.split(sep))
def format_itemize(informations, comment_symbol, tag): """ Write a latex itemize for all data in informations and return it as a list, need comment_symbol informations : set of tuple containing (name, desc) of a func comment_symbol : string defining a comment line tag : tag of the main function to include between """ lines = list() lines.append(comment_symbol + " " + tag + "\n") lines.append(comment_symbol + " \itemize{\n") for name, desc in informations: raw = comment_symbol + "\t\item " + name + ": " + desc + "\n" lines.append(raw) lines.append(comment_symbol + " }\n") lines.append(comment_symbol + " " + tag + "\n") return lines
def strip_optional_prefix(string, prefix, log=None): """ >>> strip_optional_prefix('abcdef', 'abc') 'def' >>> strip_optional_prefix('abcdef', '123') 'abcdef' >>> strip_optional_prefix('abcdef', '123', PrintingLogger()) String starts with 'abc', not '123' 'abcdef' """ if string.startswith(prefix): return string[len(prefix):] if log: log.warn('String starts with %r, not %r', string[:len(prefix)], prefix) return string
def help_template ( template = None ): """ Gets or sets the current HelpTemplate in use. """ global _help_template if template is not None: _help_template = template return _help_template
def CCDSELExtracter(CCDSEL): """Extract the individual CCDSEL (64, 32, 16, 8, 4, 2, 1) arguments from a number CCDSEL argument assuming the CCDSEL argument is valid. Argument: CCDSEL (int): Value between 0 and 127 which represent the selection of a number of CCDs. Returns: (list of int): List containing individual CCDSEL arguments. """ CCDSELs = [64, 32, 16, 8, 4, 2, 1] IndividualCCDSEL = [] for CCD in CCDSELs: if CCDSEL != 0: rest = CCDSEL % CCD if rest in CCDSELs: IndividualCCDSEL.append(rest) CCDSEL -= rest if CCD <= CCDSEL: IndividualCCDSEL.append(CCD) CCDSEL -= CCD else: # elif( rest == 0 ): # IndividualCCDSEL.append(CCD) # CCDSEL -= CCD if CCD <= CCDSEL: IndividualCCDSEL.append(CCD) CCDSEL -= CCD return IndividualCCDSEL
def dict_list_get_values_for_key(dict_list, key): """Get values for keys.""" return [d[key] for d in dict_list]
def _split_left_right(name): """Split record name at the first whitespace and return both parts. RHS is set to an empty string if not present. """ parts = name.split(None, 1) lhs, rhs = [parts[0], parts[1] if len(parts) > 1 else ''] return lhs, rhs
def paginated_list(full_list, max_results, next_token): """ Returns a tuple containing a slice of the full list starting at next_token and ending with at most the max_results number of elements, and the new next_token which can be passed back in for the next segment of the full list. """ sorted_list = sorted(full_list) list_len = len(sorted_list) start = sorted_list.index(next_token) if next_token else 0 end = min(start + max_results, list_len) new_next = None if end == list_len else sorted_list[end] return sorted_list[start:end], new_next
def convert (degrees): """ convert degree F to Degree celsius (32F - 32) * 5/9 Args: (float | int value)Degrees Farenheit returns:(numeric value) celsius """ celsius = (degrees - 32 )* 5/9 return celsius
def is_none_or_empty(param, return_value): """ Args: param : return_value : Returns: """ return return_value if param is None or param == '' else param
def find_pax(pnr, paxnum): """ Pax number is paxnum - 1 because paxes stored in list. """ paxes = pnr["name"] if len(paxes) < int(paxnum): return None return paxes[int(paxnum) - 1]
def acquire_symbols_from(name, name2sym, never_str=False): """ Acquire the symbol(s) from the iterable :param name: Name of symbol. All namespace is removed. :type name: ```Union[Any, str]``` :param name2sym: Dict from symbol name to symbol :type name2sym: ```Dict[Str, Any]``` :param never_str: If True, ensure that `getattr` on the module is always called :type never_str: ```bool``` :return: The list of symbols acquired from the module :rtype: ```Callable[[...], Any]``` """ if isinstance(name, str): name = name.rpartition(".")[2] if name.count(".") > 0 else name if name in name2sym and never_str: return name2sym[name] elif isinstance(name, tuple) and len(name) == 2: name, namespace = name name = name.rpartition(".")[2] if name.count(".") > 0 else name return name2sym[name](**vars(namespace)) if never_str and isinstance(name, str): raise KeyError("{!r} not found in {!r}".format(name, "")) return name
def classify_bnd(disrupt_dict): """ Classify genic effect of a breakend. An interchromosomal breakpoint falling within a gene is LOF. """ elements = disrupt_dict.keys() if 'CDS' in elements: return 'LOF' if 'transcript' in elements: return 'LOF' if 'gene' in elements: return 'GENE_OTHER' if 'UTR' in elements: return 'UTR' if 'promoter' in elements: return 'promoter' return 'no_effect'
def values_to_string(input_values): """Method that takes a list of values and converts them to a '|'-delimted string""" token_list = [] for value in input_values: token_list.append(str(value)) return '|'.join(token_list)
def triangle_area(base, height): """Returns the area of a triangle""" return (base*height)/2
def get_data_shape(data, strict_no_data_load=False): """ Helper function used to determine the shape of the given array. In order to determine the shape of nested tuples, lists, and sets, this function recursively inspects elements along the dimensions, assuming that the data has a regular, rectangular shape. In the case of out-of-core iterators, this means that the first item along each dimension would potentially be loaded into memory. Set strict_no_data_load=True to enforce that this does not happen, at the cost that we may not be able to determine the shape of the array. :param data: Array for which we should determine the shape. :type data: List, numpy.ndarray, DataChunkIterator, any object that support __len__ or .shape. :param strict_no_data_load: If True and data is an out-of-core iterator, None may be returned. If False (default), the first element of data may be loaded into memory. :return: Tuple of ints indicating the size of known dimensions. Dimensions for which the size is unknown will be set to None. """ def __get_shape_helper(local_data): shape = list() if hasattr(local_data, '__len__'): shape.append(len(local_data)) if len(local_data): el = next(iter(local_data)) if not isinstance(el, (str, bytes)): shape.extend(__get_shape_helper(el)) return tuple(shape) # NOTE: data.maxshape will fail on empty h5py.Dataset without shape or maxshape. this will be fixed in h5py 3.0 if hasattr(data, 'maxshape'): return data.maxshape if hasattr(data, 'shape'): return data.shape if isinstance(data, dict): return None if hasattr(data, '__len__') and not isinstance(data, (str, bytes)): if not strict_no_data_load or isinstance(data, (list, tuple, set)): return __get_shape_helper(data) return None
def _addrinfo_to_ip_strings(addrinfo): """ Helper function that consumes the data output by socket.getaddrinfo and extracts the IP address from the sockaddr portion of the result. Since this is meant to be used in conjunction with _addrinfo_or_none, this will pass None and EndPoint instances through unaffected. """ if addrinfo is None: return None return [(entry[4][0], entry[4][1]) for entry in addrinfo]
def key_mode_to_int(mode): """Return the mode of a key as an integer (1 for major and -1 for minor). Parameters ---------- mode : {'major', 'minor', None, 1, -1} Mode of the key Returns ------- int Integer representation of the mode. """ if mode in ("minor", -1): return -1 elif mode in ("major", None, 1): return 1 else: raise ValueError("Unknown mode {}".format(mode))
def seconds_to_hms(seconds): """Convert seconds in hour, minutes and seconds # Example ```python h, m, s = seconds_to_hms(5401) print('Hour: {} Minutes: {} Seconds: {}'.format(h, m, s)) ``` """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) return h,m,s
def ppm_to_unity(ppm): """Converts a ppm value to its unity value equivalent :param ppm: ppm scale value :type ppm: float :return: the unity scale value of the input ppm value :rtype: float """ return 1 + ppm * 1E-6
def mangle_sheet_name(s: str) -> str: """Return a string suitable for a sheet name in Excel/Libre Office. :param s: sheet name :return: string which should be suitable for sheet names """ replacements = { ':': '', '[': '(', ']': ')', '*': '', '?': '', "'": '"', "\\": "" } for x, y in replacements.items(): s = s.replace(x, y) return s
def countDistinctTrianglesSet(arr): """ int[][] arr return int """ uniq = set() for tri in arr: tri = list(tri) tri.sort() # key = ":".join([str(s) for s in tri]) key = tuple(tri) uniq.add(key) print(uniq) count = len(uniq) return count
def opml_attr(str_name, str_value): """Build OPML attribute pairings""" # return '%s=%s ' % (str_name, str_value) return ''.join([str_name, '=', str_value, ' '])
def decode_expanded_peers(peers): """ Return a list of IPs and ports, given an expanded list of peers, from a tracker response. """ return [(p["ip"], p["port"]) for p in peers]
def check_via_group(via_group, source_sink): """ Check the validity of each via set in the via group. :param via_group: the via_group in question. :return: via_group with all valid candidate(s) """ # valid for 2-via cell: 1 source, 1 sink # valid for 3-via cell: 2 sink, 1 source valid_group = [] for each_group in via_group: num_vias = len(each_group) num_source = 0 num_sink = 0 for each_via in each_group: # 0 = sink, 1 = source if source_sink[each_via[2]] == 1: num_source += 1 elif source_sink[each_via[2]] == 0: num_sink += 1 if num_source <= 1 and num_sink <=2: valid_group.append(each_group) return valid_group
def generate_method_bindings(obj): """Function to create the function calls, which contain calls to the godot apis""" result = "\n##################################Generated method bindings#########################################\n" result += f"cdef godot_method_bind *bind_{obj['name']}\n" for method in obj["methods"]: result += f"cdef godot_method_bind *bind_{obj['name'].lower()}_{method['name']}\n" result += f"cdef init_method_bindings_{obj['name']}():\n" result += f' bind_{obj["name"]} = api_core.godot_method_bind_get_method("Object", "_get")\n' for method in obj["methods"]: result += f" global bind_{obj['name'].lower()}_{method['name']}\n" for method in obj["methods"]: result += f""" bind_{obj['name'].lower()}_{method['name']} = api_core.godot_method_bind_get_method('{ obj['name']}', '{method['name']}')\n""" return result
def parse_interface_params(list): """ Parse a variable list of key=value args into a dictionary suitable for kwarg usage """ return {} if list is None else dict([s.split('=') for s in list])
def parse_enzyme_input(enzyme_input: str): """ Parse the list of enzymes given as input. """ enzyme_list = enzyme_input.strip(" ").split(",") return enzyme_list
def rim2arab(num): """Convert Latin numbers to Arabic: L - 50 X - 10 V - 5 I - 1 :type num: str :rtype: int """ res = 0 lt = False ltx = False for c in num: if c == "L": if ltx: res += 30 else: res += 50 lt = False ltx = False elif c == "X": if lt: res += 8 else: res += 10 lt = False ltx = True elif c == "V": if lt: res += 3 else: res += 5 lt = False ltx = False elif c == "I": res += 1 lt = True ltx = False return res
def fix_sequence_length(sequence, length): """ Function to check if length of sequence matches specified length and then return a sequence that's either padded or truncated to match the given length Args: sequence (str): the input sequence length (int): expected length Returns: str: string of length 'length' """ # check if the sequence is smaller than expected length if len(sequence) < length: # pad the sequence with 'N's sequence += 'N' * (length - len(sequence)) # check if the sequence is larger than expected length elif len(sequence) > length: # truncate to expected length sequence = sequence[:length] return sequence
def safe_dir(out_dir, year, month): """ Make a directory for this year and month as subdirectories of out_dir :param out_dir: base directory in which new directories will be created :param year: year for subdirectory :param month: month for subdirectory :return: None """ syr = str(year) smn = "{:02}".format(month) d2 = out_dir+'/'+syr+'/'+smn+'/' return d2
def get_alpha_beta(min_value, max_value, mean_value): """ for the duration on a state, draw from a beta distribution with parameter alpha and beta """ x = (mean_value - min_value) / (max_value - min_value) z = 1 / x - 1 a, b = 2, 2 * z return a, b
def generate_transition_bigram_probabilities(transition_unigram_counts, transition_bigram_counts): """Takes in the unigram and bigram count matrices. Creates dictionary containing the transition bigram probabilities in the following format: {(tag[i-1], tag[i]) : probability} where the probability is calculated by the following formula: probability((tag[i-1], tag[i])) = count((tag[i-1], tag[i]))/count(tag[i-1]) """ transition_bigram_probabilities = dict() for tag_bigram in transition_bigram_counts: transition_bigram_probabilities[tag_bigram] = float(transition_bigram_counts[tag_bigram])/transition_unigram_counts[tag_bigram[0]] return transition_bigram_probabilities
def getCountTime(time): """ if the needed time is below 1/10 of a second """ if(time<1):return (1/10)/60
def fib(a): """ Computes Fibonaci series. :param a: :return: """ prev = 1 prevprev = 1 while a > 0: tmp = prev + prevprev prevprev = prev prev = tmp a -= 1 return prev
def realQuadratic(variableA=1, variableB=1, variableC=1) -> tuple: """ Calculates solutions for a quadratic formula variableA: Ax^2 (int|float|complex) variableB: Bx (int|float|complex) variableC: C (int|float|complex) """ inverseB = (-variableB) discriminant = ((variableB**2) - (4 * variableA * variableC)) denominator = (2 * variableA) if variableA == 0: raise ZeroDivisionError else: solutionA = ((inverseB - (discriminant**0.5)) / denominator) solutionB = ((inverseB + (discriminant**0.5)) / denominator) return solutionA, solutionB
def get_first_positions(sequence): """ Reports the first occurance of each element in the sequence in a dictionary, with each element as keys, and their first position as values. Example --------- >>> sequence = [1,1,2,3,4] >>> ps.get_first_positions(sequence) {1: 0, 2: 2, 3: 3, 4: 4} """ unique_elements = list(set(sequence)) first_positions = {} for element in unique_elements: first_positions[element] = sequence.index(element) return first_positions
def media_for_creatable_guest_media(medias): """ This media should be used when creating a guest_media record.""" for m in medias: if m.id == 'TEST FOR GUESTMEDIA CREATION': return m
def empty(n): """ :param n: Size of the matrix to return :return: n by n matrix (2D array) filled with 0s """ return [[0 for i in range(n)] for j in range(n)]
def parse_state(json_obj, state_key: str): """ Retrieves the value of a state by the key of the state out of the JSON. :param json_obj: the processor's general state. :param state_key: the key for the specific state. :raises ValueError: if the passed key cannot be found in the processor state. :return: value of the matching key. """ states = json_obj["componentState"]["localState"]["state"] for state in states: if state["key"] == state_key: value = state["value"] return value raise ValueError(f"Could not find {state_key} ")
def create_axisdic(adic,tlen,dlen): """ Make an sparky axis dictionary from a universal axis dictionary Parameters: * adic axis dictionary from universal dictionary * tlen tile length * dlen data length """ dic = dict() dic["nucleus"] = adic["label"] dic["spectral_shift"] = 0 dic["npoints"] = int(dlen) dic["size"] = int(dlen) dic["bsize"] = int(tlen) dic["spectrometer_freq"] = float(adic["obs"]) dic["spectral_width"] = float(adic["sw"]) dic["xmtr_freq"] = float(adic["car"])/dic["spectrometer_freq"] dic["zero_order"] = 0.0 dic["first_order"] = 0.0 dic["first_pt_scale"] = 0.0 dic["extended"] = '\x80' # transform bit set return dic
def Binary_search( data, target, low, high): """Return True If element found at the indicated position of a list sequence. The search only considers the position from data[low] to data[high] inclusive.""" if low > high: return False else: mid = low + high // 2 if target == data[mid]: return True elif target < data[mid]: #recur on the left portion of list return Binary_search(data, target, low, mid - 1) else: #recur on the right side of list return Binary_search(data, target, mid+1 , high)
def item_hist(list_): """ counts the number of times each item appears in the dictionary """ dict_hist = {} # Insert each item into the correct group for item in list_: if item not in dict_hist: dict_hist[item] = 0 dict_hist[item] += 1 return dict_hist
def result_saving(text_name, image_name, folder_name, results): """ file processing part """ #save estimation values file = open(folder_name + text_name, 'a') file.write('name' + ' ' + 'cos' + ' ' + 'dice' + ' ' + 'jaccard' + ' ' + 'pearson' + ' ' + 'tanimoto' + '\n') file.write(image_name + ':' + ' ') file.write(str(results[0]) + ' ') file.write(str(results[1]) + ' ') file.write(str(results[2]) + ' ') file.write(str(results[3]) + ' ') file.write(str(results[4]) + '\n') file.write('\n') file.close() return None
def _get_full_bldg_pdf_url(building_id, base_url): # type: (int, str) -> str """Get report pdf url for full buildings BES does not include the url to the report.pdf in the score response for v1 full buildings, but does follow a consistent pattern for url creation in relation to the production or sandbox api url. ie: https://api.labworks.org/buildings/{building id}/report.pdf or https://buildingenergyscore.energy.gov/buildings/{building id}/report.pdf """ pdf_url_format = "{}/buildings/{}/report.pdf" base_url = base_url.rstrip('/api') return pdf_url_format.format(base_url, building_id)
def font_size(min, max, high, current_value): """ calculate font size by min and max occurances min - minimum output value max - maximum output value high - maximum input value n - current occurances """ if max < min: raise ValueError('Max cannot be lesser then Min') if current_value > high: raise ValueError('current_value cannot be greatter then high_value') return int((float(current_value) / high) * (max - min) + min)
def test_func(context): """ OCID thread function for testing purposes. Parameters ---------- context : dict The thread context. Returns ------- dict The new context. """ if 'fname' not in context: raise ValueError("fname must be defined in the context") if 'counter' not in context: raise ValueError("counter must be defined in the context") with open(context['fname'], "w+") as f: f.write("hello %d\n" % context['counter']) context['counter'] += 1 return context
def flip_1d_index_horizontally(index, rows, columns): """Finds the index to the corresponding horizontal-flipped 1d matrix value. Consider a 1d matrix [1, 2, 3, 4, 5, 6] with 3 rows and 2 columns. The original and horizontally flipped representations are shown below. 1 2 2 1 3 4 -> 4 3 5 6 6 5 This function allows the translated matrix to be accessed using indices into the original matrix, such that index 0 (value 4) becomes index 1 (value 2, corresponding to index 0 of the flipped matrix). Args: index (int): Index into the original 1d matrix. rows (int): Number of rows in the matrix. columns (int): Number of columns in the matrix. Returns: The index for the corresponding value of the horizontally flipped matrix, as an int. """ # Get current row in 1d matrix, from 0 to rows-1 current_row = index // columns # Get current column in 1d matrix, from 0 to columns-1 current_column = index % columns # Flip column (0 -> columns-1, ..., columns-1 -> 0, etc.) flipped_column = columns - current_column - 1 # Calculate total number of entries on preceding rows offset_row = current_row * columns return offset_row + flipped_column
def inverse_mod(a, p): """ Compute the modular inverse of a (mod p) :param a: An integer :param p: An integer :return: An integer """ if a < 0 or p <= a: a = a % p # From Ferguson and Schneier, roughly: c, d = a, p uc, vc, ud, vd = 1, 0, 0, 1 while c != 0: q, c, d = divmod(d, c) + (c,) uc, vc, ud, vd = ud - q * uc, vd - q * vc, uc, vc # At this point, d is the GCD, and ud*a+vd*p = d. # If d == 1, this means that ud is a inverse. assert d == 1 if ud > 0: return ud else: return ud + p
def split(s): """Return a list of words contained in s, which are sequences of characters separated by whitespace (spaces, tabs, etc.). >>> split("It's a lovely day, don't you think?") ["It's", 'a', 'lovely', 'day,', "don't", 'you', 'think?'] """ return s.split()
def remove_whitespace(line_content, old_col): """ Removes white spaces from the given line content. This function removes white spaces from the line content parameter and calculates the new line location. Returns the line content without white spaces and the new column number. E.g.: line_content = " int foo = 17; sizeof(43); " ^ |- bug_col = 18 content_begin = " int foo = 17; " content_begin_strip = "intfoo=17;" line_strip_len = 18 - 10 => 8 ''.join(line_content.split()) => "intfoo=17;sizeof(43);" ^ |- until_col - line_strip_len 18 - 8 = 10 """ content_begin = line_content[:old_col] content_begin_strip = ''.join(content_begin.split()) line_strip_len = len(content_begin) - len(content_begin_strip) return ''.join(line_content.split()), \ old_col - line_strip_len
def chunks(data, block_size): """Split data to list of chunks""" return [data[0+i:block_size+i] for i in range(0, len(data), block_size)]
def show_video(video, top_video=False): """ API-call-free, responsive friendly video embed. Don't forget to use it with the JS that tells Firefox not to use native video, and responsifying CSS. """ return { 'video': video, 'top_video': top_video }
def nonrigid_tors(spc_mod_dct_i, rotors): """ dtermine if a nonrigid torsional model is specified and further information is needed from the filesystem """ vib_model = spc_mod_dct_i['vib']['mod'] tors_model = spc_mod_dct_i['vib']['mod'] has_tors = bool(any(rotors)) tors_hr_model = bool( tors_model in ('1dhr', '1dhrf', '1dhrfa', 'mdhr', 'mdhrv')) tau_hr_model = bool(tors_model == 'tau' and vib_model != 'vib') return has_tors and (tors_hr_model or tau_hr_model)
def check_bounds(master_results, subproblem_results): """Compare upper and lower bounds - return in absolute and % terms (% difference relative to lower bound)""" gap, gap_pct = None, None return gap, gap_pct
def longest_common_substring(s1, s2): """ returns the longest common substring of two strings :param s1: a string :type s1: str :param s2: a second string :type s2: str >>> longest_common_substring('hello world how is foo bar?', 'hello daniel how is foo in the world?') ' how is foo ' """ m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))] # @UnusedVariable longest, x_longest = 0, 0 for x in range(1, 1 + len(s1)): for y in range(1, 1 + len(s2)): if s1[x - 1] == s2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return s1[x_longest - longest: x_longest]