content
stringlengths
42
6.51k
def f_measure(precision, recall): """ Computes the F1-score for given precision and recall values. :param precision: the precision value :type precision: float :param recall: the recall value :type recall: float :return: F1-score (or 0.0 if both precision and recall are 0.0) """ if precision == 0 and recall == 0: return 0.0 else: return 2.0 * precision * recall / (precision + recall)
def must_separate(nodes, page_tree): """Given a sequence of nodes and a PageTree return a list of pairs of nodes such that one is the ascendant/descendant of the other""" separate = [] for src in nodes: m = page_tree.match[src] if m >= 0: for tgt in range(src+1, m): if tgt in nodes: separate.append((src, tgt)) return separate
def get_header(line): """Return author, urgency, and timestamp from first line""" # delete empty str elements from list def _de(olist): nlist = [] for oitem in olist: if not oitem == str(''): nlist.append(oitem) return nlist try: # replace bracket strings with , slist = ['[author]', '[/author]', '[green]', '[/green]'] for sitem in slist: line = line.replace(sitem, ',') # split author from rest of line author, rline = _de(line.split(',,')) urgency, date, time = _de(rline.split(' ')) timestamp = date + ' ' + time return author, urgency, timestamp except: return "---", "---", "---"
def get_function_parameters(function): """ Return a list of query parameters that are documented using api.decorators.query_arg """ parameters = [] for name, description, return_type, required in getattr(function, 'doc_query', []): parameters.append({ 'name': name, 'description': description, 'type': return_type, 'paramType': 'query', 'required': bool(required), }) return parameters
def charValue(char): """ :param char: Characters for which I want to :return: the integer value of the character """ if 'A' <= char <= 'Z': return ord(char) - 65 elif 'a' <= char <= 'z': return ord(char) - 71
def bevel_2d(point): """ Which of the twelve edge plane(s) is point P outside of? """ edge_plane_code = 0 if point[0] + point[1] >= 1.0: edge_plane_code |= 0x001 if point[0] - point[1] >= 1.0: edge_plane_code |= 0x002 if -point[0] + point[1] > 1.0: edge_plane_code |= 0x004 if -point[0] - point[1] > 1.0: edge_plane_code |= 0x008 if point[0] + point[2] >= 1.0: edge_plane_code |= 0x010 if point[0] - point[2] >= 1.0: edge_plane_code |= 0x020 if -point[0] + point[2] > 1.0: edge_plane_code |= 0x040 if -point[0] - point[2] > 1.0: edge_plane_code |= 0x080 if point[1] + point[2] >= 1.0: edge_plane_code |= 0x100 if point[1] - point[2] >= 1.0: edge_plane_code |= 0x200 if -point[1] + point[2] > 1.0: edge_plane_code |= 0x400 if -point[1] - point[2] > 1.0: edge_plane_code |= 0x800 return edge_plane_code
def match(parent, child): """ :param parent: str, user can input a sequence that is going to be matched. :param child: str, user can input a sequence that is going to match with parent. :return: float, the ratio of how similar the child is to the parent. """ c = len(child) numerator = 0 denominator = 0 for i in range(c): if child[i] == parent[i]: numerator += 1 denominator += 1 else: denominator += 1 similarity_percent = numerator / denominator return similarity_percent
def back_indel_shift(info_index_list, cur_index) -> int: """return acc_shift(back_indel_shift) Args: info_index_list (list/tuple): list or tuples generated from align.cigar tuples cur_index (int): index related to MD tag in BAM file Returns: int: acc_shift index """ # parse soft clip and insertion if len(info_index_list) == 0: return 0 acc_shift = 0 for info_start_index, info_len in info_index_list: if info_start_index >= cur_index: return acc_shift else: acc_shift += info_len return acc_shift
def fix_label(label): """Splits label over two lines. Args: label (str): Phrase to be split. Returns: label (str): Provided label split over two lines. """ half = int(len(label) / 2) first = label[:half] last = label[half:] last = last.replace(' ', '\n', 1) return first + last
def get_solute_data_from_db(solute_spc, db): """ Returns solute data found from the RMG solute library and corresponding comment. """ if solute_spc is not None: data = db.get_solute_data_from_library(solute_spc, db.libraries['solute']) if data is not None: solute_data = data[0] solute_comment = f'From RMG-database solute library: {data[2].index}. {data[2].label}' else: solute_data = None solute_comment = 'Not found in RMG-database' return solute_data, solute_comment else: return None, None
def cleanpath(name): """jref$n4e12510j_crr.fits --> n4e12510j_crr.fits""" if "ref$" in name: return name.split("$")[-1].strip() elif name.startswith("crds://"): return name[len("crds://"):] else: return name
def _flatten_dict(xs): """Flatten a nested dictionary. The nested keys are flattened to a tuple. Example:: xs = {'foo': 1, 'bar': {'a': 2, 'b': {}}} flat_xs = flatten_dict(xs) print(flat_xs) # { # ('foo',): 1, # ('bar', 'a'): 2, # } Note that empty dictionaries are ignored and will not be restored by `unflatten_dict`. Args: xs: a nested dictionary Returns: The flattened dictionary. """ assert isinstance(xs, dict), 'input is not a dict' def _flatten(xs, prefix): if not isinstance(xs, dict): return {prefix: xs} result = {} for key, value in xs.items(): path = prefix + (key,) result.update(_flatten(value, path)) return result return _flatten(xs, ())
def quote(s): """ Ensure that primary key values do not confuse the admin URLs by escaping any '/', '_' and ':' characters. Similar to urllib.quote, except that the quoting is slightly different so that it doesn't get automatically unquoted by the Web browser. """ if type(s) != type(''): return s res = list(s) for i in range(len(res)): c = res[i] if c in ':/_': res[i] = '_%02X' % ord(c) return ''.join(res)
def file_length_checker(file_name): """ Checks the length of the file returned to the validator :param file_name: path of the file to be checked :return: """ result = open(file_name, "r") counter = 0 for line in result: counter += 1 return counter <= 1000
def hash(s, const): """Calculate the hash value of a string using base. Example: 'abc' = 97 x base^2 + 98 x base^1 + 99 x base^0 @param s value to compute hash value for @param const int to use to compute hash value @return hash value """ v = 0 p = len(s)-1 for i in reversed(range(p+1)): v += int(s[i]) * (const ** p) p -= 1 return v
def find_key(dict_obj, key): """ Return a value for a key in a dictionary. Function to loop over a dictionary and search for an specific key It supports nested dictionary Arguments: dict_obj (obj): A list or a dictionary key (str): dictionary key Return: (list) : a list with values that matches the key Example: >>> x = {"A1": "A", "B1": { "A2": "AA"} } >>> find_key(x, "A1") ['A'] >>> find_key(x, "A2") ['AA'] >>> find_key(x, "YY") [] >>> x = {"A1": "A", "B1": { "A1": "AA"} } >>> find_key(x, "A1") ['A', 'AA'] """ # List to store values results = [] # if dict_obj is a dictionary if isinstance(dict_obj, dict): for k, v in dict_obj.items(): # if value is == key if k == key: results.append(v) else: # call function again, it can be a nested dict results.extend(find_key(v, key)) # If dict_obj is a list elif isinstance(dict_obj, list): # for each item, call again the function, as maybe there are # dict inside the list for item in dict_obj: results.extend(find_key(item, key)) return results
def depth_first_flatten(task, task_array=None): """Does a depth first flattening on the child tasks of the given task. :param task: start from this task :param task_array: previous flattened task array :return: list of flat tasks """ if task_array is None: task_array = [] if task: if task not in task_array: task_array.append(task) # take a tour in children for child_task in task.children: task_array.append(child_task) # recursive call task_array = depth_first_flatten(child_task, task_array) return task_array
def _extract_variants(address, variants_str): """Return the variants (if any) represented by the given variants_str. :returns: The variants or else `None` if there are none. :rtype: tuple of tuples (key, value) strings """ def entries(): for entry in variants_str.split(','): key, _, value = entry.partition('=') if not key or not value: raise ValueError('Invalid variants after the @ in: {}'.format(address)) yield (key, value) return tuple(entries())
def _limit(value, min_value, max_value): """Limit value by min_value and max_value.""" if value < min_value: return min_value if value > max_value: return max_value return value
def get_ids_from_j(j, n, control_string, is_numerator): """Get the id of the vector items.""" static_str = control_string + str(int(is_numerator)) bits = n - j if bits > 0: bit_format = "{:0" + str(bits) + "b}" ids = [int(static_str + bit_format.format(i), 2) for i in range(2**bits)] else: ids = [int(static_str, 2)] return ids
def xxxxxx(x, y, z): """if z return x, else return y""" if z: # we're returning an integer, but the signature # says we're returning a float. # PyXLL will convert the integer to a float for us. return x return y
def get_dict_value(adict, key, prefix=None, as_array=False, splitter='.'): """Used to get value from hierarhic dicts in python with params with dots as splitter""" if prefix is None: prefix = key.split(splitter) if len(prefix) == 1: if type(adict) == type({}): if not prefix[0] in adict.keys(): return None if as_array: return [adict[prefix[0]], ] return adict[prefix[0]] elif type(adict) == type([]): if as_array: result = [] for v in adict: if prefix[0] in v.keys(): result.append(v[prefix[0]]) return result else: if len(adict) > 0 and prefix[0] in adict[0].keys(): return adict[0][prefix[0]] return None else: if type(adict) == type({}): if prefix[0] in adict.keys(): return get_dict_value(adict[prefix[0]], key, prefix=prefix[1:], as_array=as_array) elif type(adict) == type([]): if as_array: result = [] for v in adict: res = get_dict_value(v[prefix[0]], key, prefix=prefix[1:], as_array=as_array) if res: result.extend(res) return result else: return get_dict_value(adict[0][prefix[0]], key, prefix=prefix[1:], as_array=as_array) return None
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(n) """ if nums == None: return 0 if len(nums) == 0: return 0 max_sum = 0 current_sum = 0 for i in range(len(nums)): current_sum = max((nums[i] + current_sum), nums[i]) max_sum = max(current_sum, max_sum) if max_sum <=0: return max(nums) return max_sum
def __compute_num_layers(bpp, extra_bits, layer_bits=64): """ Computes the number of 'layers' required to store the original image with bpp bits-per-pixel along with the results of len(extra_bits) filtered versions of the image, each requiring bpp + extra_bits[i] bits. The filters are done in that order and each layer can have at most layer_bits bits (defaults to 64). """ nlayers, shift = 1, layer_bits-bpp for ex_bits in extra_bits: shift -= bpp + ex_bits if shift < 0: nlayers += 1 shift = layer_bits - bpp - ex_bits return nlayers
def check_output_options(input,): """ Checks the inputs of the mcmc_options dictionary and ensures that all keywords have valid values. output_options={ 'write_chain' : False,# Write MCMC chains for all paramters, fluxes, and # luminosities to a FITS table We set this to false # because MCMC_chains.FITS file can become very large, # especially if you are running multiple objects. # You only need this if you want to reconstruct chains # and histograms. 'print_output' : True, # prints steps of fitting process in Jupyter output } """ output={} # output dictionary if not input: output={ 'write_chain' : False, # Write MCMC chains for all paramters, fluxes, and # luminosities to a FITS table We set this to false # because MCMC_chains.FITS file can become very large, # especially if you are running multiple objects. # You only need this if you want to reconstruct chains # and histograms. 'print_output' : True, # prints steps of fitting process in Jupyter output } return output # Check write_chain if 'write_chain' in input: write_chain = input['write_chain'] if (not isinstance(write_chain,(bool,int))): raise TypeError('\n write_chain must be set to "True" or "False" \n') else: output['write_chain']=write_chain else: output['write_chain']= False # Check print_output if 'print_output' in input: print_output = input['print_output'] if (not isinstance(print_output,(bool,int))): raise TypeError('\n print_output must be set to "True" or "False" \n') else: output['print_output']=print_output else: output['print_output']= True # print output return output
def augment_lab_config(lab_config): """augment the configuration by - adding a "connected_to" attribute to each port which has a cable plugged in Args: lab_config (dict): [description] Returns: dict: lab_config """ for cable in lab_config["cables"]: lab_config["devices"][cable["source"]["device"]]["ports"][cable["source"]["port"]]["connected_to"] = cable["destination"] lab_config["devices"][cable["source"]["device"]]["ports"][cable["source"]["port"]]["cable"] = cable lab_config["devices"][cable["destination"]["device"]]["ports"][cable["destination"]["port"]]["connected_to"] = cable["source"] lab_config["devices"][cable["destination"]["device"]]["ports"][cable["destination"]["port"]]["cable"] = cable return lab_config
def get_bitfinex_api_url(url: str, pagination_id: int) -> str: """Get Bitfinex API URL.""" if pagination_id: return url + f"&end={pagination_id}" return url
def validate_password(value): """ Returns 'Valid' if password provided by user is valid, otherwise an appropriate error message is returned """ if not value: message = 'Please enter password.' elif not len(value) >= 8: message = 'Password must be at least 8 characters.' else: message = 'Valid' return message
def get_skiprows(workbook, sheets): """ Skip the the first row containing ACENAPHTHENE or #ACENAPHTHENE Headers can't be automatically derived """ result = {} for sheetnum in sheets: if sheetnum == 37: col = 1 else: col = 0 sheet = workbook.worksheets[sheetnum] for i,row in enumerate(sheet.rows): try: if 'ACENAPHTHENE' in row[col].value: result[sheetnum]=list(range(i)) break except TypeError: pass return result
def apply_penalty(tensor_or_tensors, penalty, **kwargs): """ Computes the total cost for applying a specified penalty to a tensor or group of tensors. Parameters ---------- tensor_or_tensors : Theano tensor or list of tensors penalty : callable **kwargs keyword arguments passed to penalty. Returns ------- Theano scalar a scalar expression for the total penalty cost """ try: return sum(penalty(x, **kwargs) for x in tensor_or_tensors) except (TypeError, ValueError): return penalty(tensor_or_tensors, **kwargs)
def get_payload_bits(payload_bytes): """Return a list of bits from a list of bytes Keyword arguments: payload_bytes -- a list of bytes""" payload_bits = list() #convert each byte to a sequence of bits for byte in payload_bytes: temp = list() for i in range(8): temp.append(byte >> i & 1) #temp has the bits in reversed order payload_bits.extend(temp[::-1]) return payload_bits
def split_crc_idx(idx): """ split crc index into tem/ccc/rc tuple """ return (idx/16, (idx/4)%4, idx%4)
def time_formatter(seconds: float) -> str: """ humanize time """ minutes, seconds = divmod(int(seconds),60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) tmp = ((str(days) + "d, ") if days else "") + \ ((str(hours) + "h, ") if hours else "") + \ ((str(minutes) + "m, ") if minutes else "") + \ ((str(seconds) + "s, ") if seconds else "") return tmp[:-2]
def svd_columns(feature_count, list_of_signals=["current_sub", "current_main"]): """Creates a dictionary of all the SVD column names for each of the signals it is to be applied to. Parameters =========== feature_count : int The number of singular-value-decomposition values to be included for each signal list_of_signals : list List that contains all the signal names for which the SVD values should be calculated for Returns =========== svd_feature_dictionary : dict Returns a dictionary with the column names as keys. """ # create empty dictionary svd_feature_dictionary = {} # go through each of the signals in the list_of_signals # and add the appropriate column name to the dictionary for i in list_of_signals: for j in range(feature_count): svd_feature_dictionary["svd_" + i + "_{}".format(j)] = [i, j] return svd_feature_dictionary
def probability(yes, no=1): """Computes the probability corresponding to given odds. Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability. yes, no: int or float odds in favor """ return float(yes) / (yes + no)
def get_scale_factor(scale, max_size, img_h, img_w): """ :param scale: min size during test :param max_size: max size during test :param img_h: orig height of img :param img_w: orig width :return: scale factor for resizing """ short = min(img_w, img_h) large = max(img_w, img_h) if short <= 0: scale_factor = 1.0 return scale_factor if scale <= 0: scale_factor = 1.0 else: scale_factor = min(scale / short, max_size / large) return scale_factor
def delete_allocation_method(dataset): """Delete key ``allocation method`` if present""" if "allocation method" in dataset: del dataset["allocation method"] return [dataset]
def get_accepted_parameters(response_list): """ Returns a dictionary specifyingS the highest supported encryption and hashing standard that both users support. """ parameters_dict = {'hashing' : 0, 'encryption' : 0} if response_list == None: return parameters_dict else: hashing_list = response_list[len(response_list)-1].split(' ') encryption_list = response_list[len(response_list)-2].split(' ') if len(hashing_list) > 1 and hashing_list[0].lower().startswith('hashing'): try: i = len(hashing_list) - 1 while i > 1: if int(hashing_list[i][0]) < 5: parameters_dict['hashing'] = int(hashing_list[i][0]) break i = i - 1 except: pass if len(encryption_list) > 1 and encryption_list[0].lower().startswith('encryption'): try: i = len(encryption_list) - 1 while i > 1: if int(encryption_list[i][0]) < 3: parameters_dict['encryption'] = int(encryption_list[i][0]) break i = i - 1 except: pass return parameters_dict
def startswith_whitespace(text): """Check if text starts with a whitespace If text is not a string return False """ if not isinstance(text, str): return False return text[:1].isspace()
def CeilNeg(n): """ Rounds up the provided number :param n: The number to round, Must be negative :return: The result of math.ceil(n) """ if n - int(n) < 0: return int(n - 1) return int(n)
def ip_str_from4bytes(s: bytes) -> str: # b'\xc0\xa8\xfa\xe5' => 192.168.250.229 """ Convert bytestring to string representation of IPv4 address Args: s: source bytestring Returns: IPv4 address in traditional notation """ return str(s[0]) + "." + str(s[1]) + "." + str(s[2]) + "." + str(s[3])
def processText(text): """ Function for clean text :param text: Text that you need to clean in form of list. :return: return Clean Text """ print(type(text)) for line in text: print(line) return text
def list_max(lst): """ Finds an item in a list that has a maximal value. Returns the index of the item and the value. It ignores items that have value None. If the list is empty or contains only None items, it returns None, None. :param lst: List to search :return: Pair of index (starting from 0) and the value. If no maximum was found, returns None, None """ max_idx = None max_val = None idx = 0 for idx, val in enumerate(lst): if val is not None and (max_val is None or max_val < val): max_idx = idx max_val = val return max_idx, max_val
def url_join(domain, *parts): """Construct url from domain and parts. """ if " " in [domain, ] + list(parts): raise Exception("empty string is not allowed in url!") l = list() if domain.endswith("/"): domain = domain[:-1] l.append(domain) for part in parts: for i in part.split("/"): if i.strip(): l.append(i) return "/".join(l)
def popravi_datum(niz): """ Format yyyy-mm-dd spremeni v dd. mm. yyyy. """ return "{2}. {1}. {0}".format(*niz.split("-"))
def norm(value_string): """ Normalize the string value in an RTE pair's ``value`` or ``entailment`` attribute as an integer (1, 0). :param value_string: the label used to classify a text/hypothesis pair :type value_string: str :rtype: int """ valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0} return valdict[value_string.upper()]
def quintic_easein(pos): """ Easing function for animations: Quintic Ease In """ return pos * pos * pos * pos * pos
def create_combination(list_of_sentences): """Generates all possible pair combinations for the input list of sentences. For example: input = ["paraphrase1", "paraphrase2", "paraphrase3"] output = [("paraphrase1", "paraphrase2"), ("paraphrase1", "paraphrase3"), ("paraphrase2", "paraphrase3")] Args: list_of_sentences: the list of input sentences. Returns: the list of all possible sentence pairs. """ num_sentences = len(list_of_sentences) - 1 combinations = [] for i, _ in enumerate(list_of_sentences): if i == num_sentences: break num_pairs = num_sentences - i populated = num_pairs * [list_of_sentences[i]] zipped = list(zip(populated, list_of_sentences[i + 1:])) combinations += zipped return combinations
def __weighted_avg(samples, weight_table, n): """ Averages by weight :param samples: the samples to average :param weight_table: the weight table, if None it's a normal average :param n: the number of samples :return: the average """ return sum(x * (weight_table[i] if weight_table is not None else 1 / n) for i, x in enumerate(samples))
def match_metadata(table_load_jobs, commit_jobs): """ Attempts to match table load job with commit job via metadata value :param table_load_jobs: list(Job); list of table load Jobs :param commit_jobs: list(Job); list of commit jobs :return dict; { 'matches': [(load_job, commit_job)] 'unmatched': { 'load_jobs': list(load_job), 'commit_jobs': list(commit_job) } } """ to_return = { 'matches': dict(), 'unmatched': { 'load_jobs': list(), 'commit_jobs': list() } } for tl_job in table_load_jobs: tl_metadata = tl_job.details.metadataVersion found = list() for count in range(len(commit_jobs)): commit_job = commit_jobs[count] if commit_job.details.metadataVersion == tl_metadata: if to_return['matches'].get(tl_job, None) is None: to_return['matches'][tl_job] = list() # check that when removing job job actually is copied (may need to be deep copied over due to python references) to_return['matches'][tl_job].append(commit_job) found.append(count) if len(found) == 0: to_return['unmatched']['load_jobs'].append(tl_job) else: for job in found: del commit_jobs[job] to_return['unmatched']['commit_jobs'] = commit_jobs # save whatever is left as unmatched return to_return
def unique(seq): """Remove duplicates from a list in Python while preserving order. :param seq: a python list object. :return: a list without duplicates while preserving order. """ seen = set() seen_add = seen.add """ The fastest way to sovle this problem is here Python is a dynamic language, and resolving seen.add each iteration is more costly than resolving a local variable. seen.add could have changed between iterations, and the runtime isn't smart enough to rule that out. To play it safe, it has to check the object each time. """ return [x for x in seq if x not in seen and not seen_add(x)]
def incident_priority_to_dbot_score(score: float) -> int: """Converts the CyberArk score to DBot score representation, while the CyberArk score is a value between 0.0-100.0 and the DBot score is 1,2 or 3. Can be one of: 0.0 - 35.0 -> 1 35.1 - 75.0 -> 2 75.1 - 100.0 -> 3 """ if 0 <= score <= 35: return 1 elif 35 < score <= 75: return 2 elif 75 < score <= 100: return 3 return 0
def mint(string): """ Convert a numeric field into an int or None """ if string == "-": return None if string == "": return None return int(string)
def substance_name_match(name, substance): """check if name matches any value in keys we care about""" lower_name = name.lower() return any( [ lower_name == substance[key].lower() for key in ["name", "pretty_name"] if key in substance ] + [lower_name == alias.lower() for alias in substance.get("aliases", [])] )
def true_solar_time(hour_of_day: float, local_time_constant: float, time_ekvation: float) -> float: """True solar time in hours - DK: Sand soltid""" true_solar_time = hour_of_day + (local_time_constant - time_ekvation) / 60 return true_solar_time
def starts_with_vowel(word): """Check for pronoun compability -- 'a' vs. 'an'""" return True if word[0] in 'aeiou' else False
def modify(boxes, modifier_fns): """ Modifies boxes according to the modifier functions. Args: boxes (dict or list): Dictionary containing box objects per image ``{"image_id": [box, box, ...], ...}`` or list of bounding boxes modifier_fns (list): List of modifier functions that get applied Returns: (dict or list): boxes after modifications Warning: These modifier functions will mutate your bounding boxes and some of them can even remove bounding boxes. If you want to keep a copy of your original values, you should pass a copy of your bounding box dictionary: >>> import copy >>> import brambox.boxes as bbb >>> >>> new_boxes = bbb.modify(copy.deepcopy(boxes), [modfier_fns, ...]) """ if isinstance(boxes, dict): for _, values in boxes.items(): for i in range(len(values)-1, -1, -1): for fn in modifier_fns: values[i] = fn(values[i]) if values[i] is None: del values[i] break else: for i in range(len(boxes)-1, -1, -1): for fn in modifier_fns: boxes[i] = fn(boxes[i]) if boxes[i] is None: del boxes[i] break return boxes
def derivative(v1, v0, t1, t0): """Computes the difference quotient. Returns an approximation of the derivative dv/dt with special handling of delta(t) = 0. """ if t1 > t0: return (v1 - v0) / (t1 - t0) else: return 0
def flatten(value): """ flatten a list """ return [item for sublist in value for item in sublist]
def cant_infer(data): """ Can't infer what data is """ hophop = not data troptrop = True if data else False toptop = data or True return hophop, troptrop, toptop
def _parse_face(face_row): """Parses a line in a PLY file which encodes a face of the mesh.""" face = [int(index) for index in face_row.strip().split()] # Assert that number of vertices in a face is 3, i.e. it is a triangle if len(face) != 4 or face[0] != 3: raise ValueError( 'Only supports face representation as a string with 4 numbers.') return face[1:]
def get_pipeline_definition(pipeline_name, working_dir): """Return inputs as a mock pipeline loader stub.""" return {'pipeline_name': pipeline_name, 'working_dir': working_dir}
def calc_integration_time(num_groups, frame_time, frames_per_group, num_skips): """Calculates the integration time. Parameters ---------- num_groups : int Groups per integration.] frame_time : float Frame time (in seconds) frames_per_group : int Frames per group -- always 1 except maybe brown dwarves num_skips : int Skips per integration -- always 0 except maybe brown dwarves Returns ------- integration_time : float Integration time (in seconds) """ integration_time = (num_groups * (frames_per_group + num_skips) - num_skips) * frame_time return integration_time
def remove_tag(value, arg): """Removes a tag from the selected tags string If more than one tag (contains /) then check if first or follow-on before removing """ if "/" in value: if ("/" + arg) in value: return value.replace(("/" + arg), '') else: return value.replace((arg + "/"), '')
def prepare_check(data): """Prepare check for catalog endpoint Parameters: data (Object or ObjectID): Check ID or check definition Returns: Tuple[str, dict]: where first is ID and second is check definition """ if not data: return None, {} if isinstance(data, str): return data, {} result = {} if "ID" in data: result["CheckID"] = data["ID"] for k in ("Node", "CheckID", "Name", "Notes", "Status", "ServiceID"): if k in data: result[k] = data[k] if list(result) == ["CheckID"]: return result["CheckID"], {} return result.get("CheckID"), result
def multiply_values(dictionary: dict, num: int) -> dict: """Multiplies each value in `dictionary` by `num` Args: dictionary (dict): subject dictionary num (int): multiplier Returns: dict: mapping of keys to values multiplied by multiplier """ return ( {key: value * num for key, value in dictionary.items()} if dictionary is not None else {} )
def utcstr(ts): """ Format UTC timestamp in ISO 8601 format. :param ts: The timestamp to format. :type ts: instance of :py:class:`datetime.datetime` :returns: Timestamp formatted in ISO 8601 format. :rtype: unicode """ if ts: return ts.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z" else: return ts
def selection_sort(A, show_progress=False): """ The selection sort algorithm sorts an array by repeatedly finding the minimum element (considering ascending order) from unsorted part and putting it at the beginning. The algorithm maintains two subarrays in a given array. 1) The subarray which is already sorted. 2) Remaining subarray which is unsorted. In every iteration of selection sort, the minimum element (considering ascending order) from the unsorted subarray is picked and moved to the sorted subarray. """ #Iterate the array as many times as there are items-1 for k in range(len(A)-1): if show_progress: print(A) #Reference value for comparison ref = k #Find the smallest item of the array and put it in the front for i in range(k+1, len(A)): if A[i] < A[ref]: ref = i A[ref], A[k] = A[k], A[ref] if show_progress: print(A) return A
def prep_thename(softwareversion, appendbars=False, temp=False): """ Generate name for output file. :param softwareversion: Software release version. :type softwareversion: str :param appendbars: Whether to add app bars to file. Default is false. :type appendbars: bool :param temp: If file we write to is temporary. Default is false. :type temp: bool """ thename = softwareversion if appendbars: thename += "plusapps" if temp: thename = "TEMPFILE" return thename
def gcd(a, b): """ This is gcd algorithm is the same as the extEuclids. """ if b == 0: return 1, 0, a else: x,y,d = gcd(b, a % b) return y, (x-y*(a//b)), d
def _strings_exists(*strings: str) -> bool: """Check that all of the strings exist and none of them are just the str 'None'.""" for s in strings: if s in ('', 'None'): return False return True
def rook_attack_id(x, y, board, king_x, king_y): """ Function for determining fields that can be blockaded to stop the check of the king for a rook """ indices = [] if king_x == x and king_y < y: dx = 0 dy = -1 elif king_x == x and king_y > y: dx = 0 dy = 1 elif king_x < x and king_y == y: dx = -1 dy = 0 elif king_x > x and king_y == y: dx = 1 dy = 0 else: return indices curr_x = x + dx curr_y = y + dy while 0 <= curr_x < 8 and 0 <= curr_y < 8 and board[curr_x][curr_y] is None: indices.append((curr_x, curr_y)) curr_x += dx curr_y += dy return indices
def to_pixel_format(contour): """OpenCV contours have a weird format. We are converting them to (row, col)""" return [(pixel[0][1], pixel[0][0]) for pixel in contour]
def divide(string, length): """ Taken (with permission) from https://github.com/TheElementalOfCreation/creatorUtils Divides a string into multiple substrings of equal length :param string: string to be divided. :param length: length of each division. :returns: list containing the divided strings. Example: >>>> a = divide('Hello World!', 2) >>>> print(a) ['He', 'll', 'o ', 'Wo', 'rl', 'd!'] """ return [string[length * x:length * (x + 1)] for x in range(int(len(string) / length))]
def ids_to_title_artist(dali_dataset): """Transform the unique DALI id to """ output = [[value.info['id'], value.info['artist'], value.info['title']] for key, value in dali_dataset.items()] output.insert(0, ['id', 'artist', 'title']) return output
def sort_selective (array): """ Function how sort the given array b) no only the length of the array c) 21 d) 5 e) 13 f) yes g) 50 - p = 41 c = 125 100 - p = 89 c = 281 500 - p = 457 c = 1422 @type array: list @param array: The list how need to be sorted @rtype: list @return: Return the sorted list """ if (type(array) is not list): raise TypeError('Type of array must be \'list\' and not ' + str(type(array))) n = len(array) for j in range(n - 1): i_min = j for i in range(j + 1, n): if (array[i] < array[i_min]): i_min = i if (i_min != j): # Swape value tmp = array[j] array[j] = array[i_min] array[i_min] = tmp return array
def unique (inlist): """ Returns all unique items in the passed list. If the a list-of-lists is passed, unique LISTS are found (i.e., items in the first dimension are compared). Usage: unique (inlist) Returns: the unique elements (or rows) in inlist """ uniques = [] for item in inlist: if item not in uniques: uniques.append(item) return uniques
def smart_truncate(string, max_length=0, word_boundaries=False, separator=' '): """ Truncate a string """ string = string.strip(separator) if not max_length: return string if len(string) < max_length: return string if not word_boundaries: return string[:max_length].strip(separator) if separator not in string: return string[:max_length] truncated = '' for word in string.split(separator): if word: next_len = len(truncated) + len(word) + len(separator) if next_len <= max_length: truncated += '{0}{1}'.format(word, separator) if not truncated: truncated = string[:max_length] return truncated.strip(separator)
def _get_term_value(term): """ Auxiliary function: gets str or value from list of value or {key: value} """ if isinstance(term, dict): [(k, v)] = term.items() return v else: return term
def format_dict_counter(dict_counter): """ Format dictionary and rank by sum """ result = [] for k, v in sorted(dict_counter.items(), key=lambda item: item[1], reverse=True): result.append({"budget_type": k, "amount": v}) return result
def _normalized_import_cycle(cycle_as_list, sort_candidates): """Given an import cycle specified as a list, return a normalized form. You represent a cycle as a list like so: [A, B, C, A]. This is equivalent to [B, C, A, B]: they're both the same cycle. But they don't look the same to python `==`. So we normalize this list to a data structure where different representations of the cycle *are* equal. We do this by rearranging the cycle so that a canonical node comes first. We pick the node to be the node in cycle_as_list that is also in sort_candidates. If there are multiple such nodes, we take the one that's first alphabetically. We assume a simple cycle (that is, one where each node has only one incoming edge and one outgoing edge), which means that each node only occurs here once, so the sort order is uniquely defined. """ sort_elts = [node for node in cycle_as_list if node in sort_candidates] if not sort_elts: # probably impossible, but best to be safe sort_elts = cycle_as_list min_index = cycle_as_list.index(min(sort_elts)) # The weird "-1" here is because A occurs twice in the input # cycle_as_list, but we want min_elt to occur twice in the output. return tuple(cycle_as_list[min_index:-1] + cycle_as_list[:min_index + 1])
def get_maxlevel(divs, maxlevel): """ Returns the maximum div level. """ for info in divs: if info['level'] > maxlevel: maxlevel = info['level'] if info.get('subdivs', None): maxlevel = get_maxlevel(info['subdivs'], maxlevel) return maxlevel
def are_same(string_window, pat_window): """ This check if two arrays with same length 256 have same count of chars :param string_window: array with counts of char from input string with len of pat string :param pat_window: array with counts of char from pattern string :return:bool True if counts are same in both arrays else False """ for i in range(256): if string_window[i] != pat_window[i]: return False return True
def bboxes_overlap(bbox_1, bbox_2): """Determines if two bounding boxes overlap or coincide Parameters ---------- bbox_1 : 4-tuple 1st bounding box convention: (minX, minY, maxX, maxY) bbox_2 : 4-tuple 2nd bounding box convention: (minX, minY, maxX, maxY) Returns ------- overlap : bool True if bounding boxes overlap / coincide False otherwise """ # 2 tiles overlap iff their projections onto both x and y axis overlap # Overlap in 1D iff box1_max > box2_min AND box1_min < box2_max overlap = ((bbox_1[2] >= bbox_2[0]) and (bbox_1[0] <= bbox_2[2])) and \ ((bbox_1[3] >= bbox_2[1]) and (bbox_1[1] <= bbox_2[3])) return overlap
def chk_pfx( msg, p ): """check if msg content contains the given prefix""" return msg[:len(p)] == p
def format_perc(x, dec_places=1): """ format_perc x: a number to format as a percentage dec_places: an integer for the number of digits past the decimal """ format_str = '{{:,.{}f}}%'.format(dec_places) return format_str.format(float(100*x))
def parse_repeat(unparsed_rep): """Parse a repeat returning it as a single item or a list as appropriate.""" if ',' in unparsed_rep: repeat = unparsed_rep.split(',') elif unparsed_rep.isnumeric(): repeat = int(unparsed_rep) else: repeat = unparsed_rep return repeat
def _get_valid_number(section, option, provided): """Validates an int type configuration option. Returns None by default if the option is unset. """ if provided is None: return None try: return int(provided) except ValueError: error = "Value provided for '{0}: {1}' is not a valid number".format( section, option) raise ValueError(error)
def getBestCols(Matrix, numCols): #expects matrix of floats, except header #no row names """ given a matrix of estimated proportions for each sample, returns the indices of the columns with the highest average values """ outMat = [] TotalValues = [0.0] * len(Matrix[1]) for line in Matrix[1:]: for i in range(len(line)): TotalValues[i] += float(line[i]) for line in Matrix: outLine = [] SortedValues = sorted(TotalValues)[::-1] MaxIndices = [] for MaxVal in SortedValues[:numCols]: MaxIndices.append(TotalValues.index(MaxVal)) TotalValues[TotalValues.index(MaxVal)] = -1 return MaxIndices
def sql_comment(comment: str) -> str: """ Transforms a single- or multi-line string into an ANSI SQL comment, prefixed by ``--``. """ """Using -- as a comment marker is ANSI SQL.""" if not comment: return "" return "\n".join(f"-- {x}" for x in comment.splitlines())
def bin_to_gray(bin_list): """ Convert from binary coding to gray coding. We assume big endian encoding. Examples ======== >>> bin_to_gray('111') '100' See Also ======== gray_to_bin """ b = [bin_list[0]] for i in range(len(bin_list) - 1): b += str(int(bin_list[i]) ^ int(b[i - 1])) return ''.join(b)
def get_value(pairs, key): """Returns a the value for the given key in the given pairs. Args: pairs: A list of {"key": key, "value": value} dicts. key: A key whose value to get. If the key appears more than once, only the first value is returned. Returns: The value for the given key. """ for p in pairs: if p['key'] == key: return p['value'] return []
def npv_conf(conf): """compute negative precision""" TN, FP, FN, TP = conf if (TN + FN) == 0: return 0 return TN / float(TN + FN)
def get_heading_indices(row: list) -> dict: """generates a dictionary mapping desired headings to row indices to allow for changing order of columns in source data Args: row (list): row of data from CSV file Returns: dict: dictionary of heading matched with row index """ headings = [ "Date Dispensed", "Patient Name", "Street", "Town or City", "Birth Date", "PPSN No", "Gender", "Qty", "Script Dispensed As", "Directions Expanded", "Contract GP Name", "Contract GP Address", ] heading_indices = dict() for heading in headings: heading_indices[heading] = row.index(heading) return heading_indices
def parse_cmd_out(cmd_out): """Get true of false.""" lines = cmd_out.split("\n") if "True" in lines: return True else: return False
def two_sum(a, b): """ Add ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate sum (with some floating point error) and the second is the error of the float64 sum. Using the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- sum, err: float64 Approximate sum of a + b and the exact floating point error """ x = a + b eb = x - a eb = b - eb ea = x - b ea = a - ea return x, ea + eb
def number_to_letter(n): """Returns a capital letter representing ordinal position. E.g., 1=A, 2=B, etc. Appends letters once you reach 26 in a way compatible with Excel/Google Sheets column naming conventions. 27=AA, 28=AB... """ string = "" if n is None: n = 0 while n > 0: n, remainder = divmod(n - 1, 26) string = chr(65 + remainder) + string return string
def my_autopct(x): """Function for autopct of plt.pie. This results in values not being printed in the pie if they are 'too small'""" if x > 4: return f"{x:.2f} %" return ""
def get_historical_date(data_point): """Returns the date of a DATA_POINT""" try: return data_point['begins_at'][0:10] except: return None
def convert_to_phrase(target_name): """ we only eliminate _, not /, so "fish tank/bowl" still remains. """ wds = target_name.split(' ') wds = [wd for _ in wds for wd in _.split('_')] return ' '.join(wds)