content
stringlengths
42
6.51k
def object_info_as_dict(object_info): """Convert a KBase object_info list into a dictionary.""" [ _id, _name, _type, _save, _version, _owner, _ws, _ws_name, _md5, _size, _meta, ] = object_info return dict( objid=_id, name=_name, type=_type, save_date=_save, ver=_version, saved_by=_owner, wsid=_ws, workspace=_ws_name, chsum=_md5, size=_size, meta=_meta, )
def str_space_to_list(string): """ Converts a spaced string in a list. The string consists of the succession of the elements of the list separated by spaces. :param string: the string to convert. :return: the corresponding list. """ string_list = string.split(" ") list = [] if string_list != '': for element in string_list: if element != '': list.append(element) return list
def maximum_value(tab): """ brief: return maximum value of the list args: tab: a list of numeric value expects at leas one positive value return: the max value of the list the index of the max value raises: ValueError if expected a list as input ValueError if no positive value found """ if not(isinstance(tab, list)): raise ValueError('Expected a list as input') valMax = 0.0 valMaxIndex = -1; nPositiveValues = 0 for i in range(len(tab)): if tab[i] >= 0 and tab[i] > valMax: valMax = float(tab[i]) valMaxIndex = i nPositiveValues += 1 if nPositiveValues <= 0: raise ValueError('No positive value found') return valMax, valMaxIndex
def increment_array_and_count(tag_name, count, word): """ Increment a count within a dictionary or add a new count if necessary """ tag = tag_name try: count[word] += 1 except: count[word] = 1 return tag, count
def LoadCSV(filename): """Load CSV file. Loads a CSV (comma-separated value) file from disk and returns it as a list of rows where each row is a list of values (which are always strings). """ try: with open(filename) as f: rows = [] while 1: line = f.readline() if not line: break rows.append(line.split(',')) except IOError: rows = [] return rows
def lowercase(text): """Return the given text in lowercase.""" lowercase_text = text.lower() return lowercase_text
def groupms_byiconf(microstates, iconfs): """ This function takes in a list of microstates and a list of conformer indicies, divide microstates into two groups: the first one is those contain one of the given conformers, the second one is those contain none of the listed conformers. """ ingroup = [] outgroup = [] for ms in microstates: contain = False for ic in iconfs: if ic in ms.state: ingroup.append(ms) contain = True break if not contain: outgroup.append(ms) return ingroup, outgroup
def rectangle_clip(recta, rectb): """ Return the clipped rectangle of ``recta`` and ``rectb``. If they do not intersect, ``None`` is returned. >>> rectangle_clip((0, 0, 20, 20), (10, 10, 20, 20)) (10, 10, 10, 10) """ ax, ay, aw, ah = recta bx, by, bw, bh = rectb x = max(ax, bx) y = max(ay, by) w = min(ax + aw, bx + bw) - x h = min(ay + ah, by + bh) - y if w < 0 or h < 0: return None return (x, y, w, h)
def determineDocument(pdf): """ Scans the pdf document for certain text lines and determines the type of investment vehicle traded""" if 'turbop' in pdf or 'turboc' in pdf: return 'certificate' elif 'minil' in pdf: return 'certificate' elif 'call' in pdf or 'put' in pdf: return 'warrant' else: return 'stock'
def fib_table_for(n): """ Returns: fibonacci list [a0, a1, ..., an] Parameter n: the position in the fibonacci sequence Precondition: n >= 0 is an int """ if n == 0: return [1] # if for n==1 is unnecessary fib = [1,1] for k in range(2,n): fib.append(fib[-1] + fib[-2]) return fib
def remove_args(route_title): """ Remove args from title string for display in console | str --> str """ print('Running remove_args') arg_index = route_title.find('(') if arg_index == -1: return route_title return route_title[0:arg_index]
def nested_get(dictionary: dict, keys: list): """Set value to dict for list of nested keys >>> nested_get({'key': {'nested_key': 123}}, keys=['key', 'nested_key']) 123 """ nested_dict = dictionary for key in keys[:-1]: nested_dict = nested_dict[key] return nested_dict.get(keys[-1])
def humanize(memory, suffix="B", kilo=1024): """ Scale memory to its proper format e.g: 1253656 => '1.20 MiB' 1253656678 => '1.17 GiB' """ if kilo == 1000: units = ["", "k", "M", "G", "T", "P"] elif kilo == 1024: units = ["", "Ki", "Mi", "Gi", "Ti", "Pi"] else: raise ValueError("kilo must be 1000 or 1024!") for unit in units: if memory < kilo: return f"{memory:.2f} {unit}{suffix}" memory /= kilo
def is_minimum_metric_set(app, expected, collected): """ Determine if the required metrics are found. """ expected = set(expected) collected = set(collected) missing_metrics = expected.difference(collected) if missing_metrics: msg = "Expected metrics not found: {}" app["logger"].warn(msg.format(",".join(missing_metrics))) return not missing_metrics
def _mangle_attr(name): """ Mangle attributes. The resulting name does not startswith an underscore '_'. """ return 'm_' + name
def matrixmult (A, B): """Matrix multiplication function This function returns the product of a matrix multiplication given two matrices. Let the dimension of the matrix A be: m by n, let the dimension of the matrix B be: p by q, multiplication will only possible if n = p, thus creating a matrix of m by q size. Parameters ---------- A : list First matrix, in a 2D array format. B : list Second matrix, in a 2D array format. Returns ------- C : list The product of the matrix multiplication. Examples -------- >>> from .pycgmStatic import matrixmult >>> A = [[11,12,13],[14,15,16]] >>> B = [[1,2],[3,4],[5,6]] >>> matrixmult(A, B) [[112, 148], [139, 184]] """ C = [[0 for row in range(len(A))] for col in range(len(B[0]))] for i in range(len(A)): for j in range(len(B[0])): for k in range(len(B)): C[i][j] += A[i][k]*B[k][j] return C
def return_timings(data, trial_type): """ Finds all trials matching the string 'trial_type', retrieves onset in seconds, and returns them as a list. """ data = filter(lambda d: trial_type in d[0], data) onsets = [] for trial in data: onsets.append(trial[5]) return onsets
def clear_bit(int_type, offset): """Return an integer with the bit at 'offset' cleared.""" mask = ~(1 << offset) return int_type & mask
def _next_pow_two(n): """Returns the next power of two greater than or equal to `n`""" i = 1 while i < n: i = i << 1 return i
def _fill_mi_header(row, control_row): """ Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ------- Returns changed row and control_row """ last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == "" or row[i] is None: row[i] = last else: control_row[i] = False last = row[i] return row, control_row
def reverse_complement(number, length): """ Calculate the reverse complement of a DNA sequence in a binary representation. :arg int number: Binary representation of a DNA sequence. :return: Binary representation of the reverse complement of the sequence corresponding to `number`. :rtype: int """ number = ~number result = 0x00 for i in range(length): result = (result << 2) | (number & 0x03) number >>= 2 return result
def ucb_air_criterion( num_arms, round_num, beta, max_attainable=False ): """ Computes whether to add a new arm from the current number of arms, the round number, and problem properties using the Arm Introducing Rule (AIR). For more details, refer to Wang et al. 2008, "Algorithms for infinitely many armed bandits" Parameters: ----------- num_arms: integer The current number of arms round_num: integer The current round number beta: numeric The problem hardness factor. Must be positive. Smaller corresponds to easier problems where it is more likely to sample a nearly optimal arm. max_attainable: boolean (default False) Whether the maximum reward is attainable, and correspondingly, whether arms with higher means will tend to have less variance. Returns: -------- new_arm: boolean Whether or not a new arm should be added """ # At minimum we should have 2 arms if num_arms < 2: return True if max_attainable or beta >= 1.0: return num_arms < round_num ** ( beta / (beta + 1) ) else: return num_arms < round_num ** ( 0.5 * beta )
def vectorize(values): """ Takes a value or list of values and returns a single result, joined by "," if necessary. """ if isinstance(values, list): return ','.join(str(v) for v in values) return values
def calculate_handlen(hand): """ Returns the length (number of letters) in the current hand. hand: dictionary (string-> int) returns: integer """ return len(hand) #returns the length of Hand
def build_ui_field(item): """ The function is used to build the description of mongoengine fields depending on the type in the YAML file description. An example of item is given below: {'title': {'required': True, 'type': 'text', 'label': 'Title'}} :param item: :return: string in the following format: title = StringField(required=True) """ for keys, values in item.items(): field_entry = keys field_type = "" required = "" default_value = "" if isinstance(values, dict): for key, value in values.items(): if key in ['required', 'type']: if key == 'required': required = key + "=" + str(value) elif key == 'type': if value in ['text', 'textarea', 'dropdown']: field_type = "StringField" elif value == 'textarea': field_type = "TextAreaField" elif value == 'dropdown': field_type = "SelectField" elif value == 'checkbox': field_type = "StringField" default_value = "no" elif value == 'email': field_type = "EmailField" if default_value and required: field_entry = '{0} = {1}({2}, default={3})' \ .format(field_entry, field_type, required, default_value) elif default_value and not required: field_entry = '{0} = {1}(default={2})' \ .format(field_entry, field_type, default_value) else: field_entry = '{0} = {1}({2})' \ .format(field_entry, field_type, required) return field_entry, keys
def detect_process_by_name(proc_name, exec_path, port): """Checks if process of given name runs on given ip_address and port. Args: proc_name -- process name, exec_path -- path to executable, port -- process port. """ pids = [] from os import popen for line in popen("ps ax | grep " + proc_name + " | grep -v grep"): if exec_path in line and str(port) in line: line = line.strip().split() pids.append(line[0]) if pids: return True return False
def remove_duplicates(l:list): """ This method can remove duplicates for lists of objects that implement _eq_ but do not implement _hash_. For such cases l = list(set(l)) wont work. """ return [obj for index, obj in enumerate(l) if obj not in l[index + 1:]]
def count_pos(L): """ (list) -> bool Counts the number of negative values in a list and returns True if all elements of L are positive. Restriction: L must be a list with numbers in it """ for i in L: if i <= 0: return False else: continue return True
def _standardize_keys(zipcounty): """Standardize and keep relevent fields (zip and county).""" standard_area = {} for k, v in zipcounty.items(): if k.replace(' ', '').lower() in ['zip', 'zipcode', 'zip code']: standard_area['zipCode'] = v if k.replace(' ', '').lower() in ['county', 'countyname', 'county name']: standard_area['countyName'] = v return standard_area
def time(seconds): """ Format time as a string. Parameters: seconds (float): time in seconds """ sec_per_min = 60 sec_per_hour = 60 * 60 sec_per_day = 24 * 60 * 60 if seconds > sec_per_day: return "%.2f days" % (seconds / sec_per_day) elif seconds > sec_per_hour: return "%.2f hours" % (seconds / sec_per_hour) elif seconds > sec_per_min: return "%.2f mins" % (seconds / sec_per_min) else: return "%.2f secs" % seconds
def convert_to_map(list_str): """ Convert a list of strings (['key', 'value', 'key', 'value', ...]) into {key: value} Parameters ---------- list_str : list, type of element is String list of strings in the format of ['key', 'value', 'key', 'value', ...] Returns ------- key_value : dict {key: value} """ key_value = {} for i in range(len(list_str)): if i % 2 != 0: continue try: key_value[list_str[i]] = list_str[i + 1] except: print("Invalid number of arguments, which should be even.") return key_value
def num_bytes_to_str(num_bytes): """Return a number of bytes as a human-readable string.""" for unit in ("B", "KB", "MB", "GB"): if num_bytes < 1024: return "{:.1f} {}".format(num_bytes, unit) num_bytes /= 1024 return "{:.1f} TB".format(num_bytes)
def get_commands_to_config_vrf(delta, vrf): """Gets commands to configure a VRF Args: delta (set): params to be config'd- created in nxos_vrf vrf (str): vrf name Returns: list: ordered list to config a vrf Note: Specific for Ansible module(s). Not to be called otherwise. """ commands = [] for param, value in delta: command = None if param == 'description': command = 'description ' + value elif param == 'admin_state': if value.lower() == 'up': command = 'no shutdown' elif value.lower() == 'down': command = 'shutdown' if command: commands.append(command) if commands: commands.insert(0, 'vrf context ' + vrf) return commands
def curie_to_str(prefix: str, identifier: str) -> str: """Combine a prefix and identifier into a CURIE string.""" return f"{prefix}:{identifier}"
def median(nums): """ calculates the median of a list of numbers >>> median([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]) 2.5 >>> median([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4.1, 1000000]) 3 >>> median([]) Traceback (most recent call last): ... ValueError: Need a non-empty iterable """ ls = sorted(nums) n = len(ls) if n == 0: raise ValueError("Need a non-empty iterable") # for uneven list length: elif n % 2 == 1: # // is floordiv: return ls[n // 2] else: return sum(ls[int(n / 2 - 1):int(n / 2 + 1)]) / 2.0
def integer(number, *args, **kwargs): """Improved int function that can return the correct value of a binary number in twos compliment """ if len(kwargs) > 0 and kwargs['signed'] and number[0] == '1': return -(-(int(number, *args))+(2**len(number))) return int(number, *args)
def normalize(data_list): """ :param data_list: [(x0, y0), (x1, y1), ..., (xn-1, yn-1)] """ x_list = [] y_list = [] for pair in data_list: x_list.append(pair[0]) y_list.append(pair[1]) x_min = min(x_list) x_max = max(x_list) x_range = x_max - x_min y_min = min(y_list) y_max = max(y_list) y_range = y_max - y_min r = max(x_range, y_range) normed_data_list = [((x - x_min) / r, (y - y_min) / r) for x, y in zip(x_list, y_list)] return normed_data_list
def count_mutations(tree, ntaxa): """ Takes pairs of taxa/nodes and alleles, and returns the number of mutations that happened along the tree. Pairs must be ordered in same way as seq-gen output. """ # node/taxon IDs labels = [int(tree[i].split()[0]) for i in range(len(tree))] # corresponding alleles alleles = [tree[i].split()[1] for i in range(len(tree))] # node label for root of the whole tree root = ntaxa + 1 # to keep track of which comparisons have already been made comparisons = [] # to keep track of which taxon we are currently following current_taxon = 1 # mutation counter mutations = 0 while current_taxon <= ntaxa: for i in range(len(labels)): if ( labels[i] > root and labels[i - 1] >= root and labels[i] == labels[i - 1] + 1 ): # if the current label is an ancestral node other than the root, and the previous label is an ancestral node if [ labels[i], labels[i - 1], ] not in comparisons: # if this comparison between nodes has not already been made if alleles[i] != alleles[i - 1]: # if there was a mutation mutations += 1 comparisons.append([labels[i], labels[i - 1]]) else: comparisons.append([labels[i], labels[i - 1]]) elif ( labels[i] > root and labels[i - 2] >= root and labels[i] == labels[i - 2] + 1 ): # if the current label is an ancestral node, and the previous ancestral node has 1 taxon descending from it if [ labels[i], labels[i - 2], ] not in comparisons: # if this comparison between nodes has not already been made if alleles[i] != alleles[i - 2]: # if there was a mutation mutations += 1 comparisons.append([labels[i], labels[i - 2]]) else: comparisons.append([labels[i], labels[i - 2]]) elif ( labels[i] > root and labels[i - 3] >= root and labels[i] == labels[i - 3] + 1 ): # if the current label is an ancestral node, and the previous ancestral node has 2 taxa descending from it if [ labels[i], labels[i - 3], ] not in comparisons: # if this comparison between nodes has not already been made if alleles[i] != alleles[i - 3]: # if there was a mutation mutations += 1 comparisons.append([labels[i], labels[i - 3]]) else: comparisons.append([labels[i], labels[i - 3]]) elif ( labels[i] > root and labels[i - 4] >= root and labels[i] == labels[i - 4] + 2 ): # if the current label is an ancestral node, and a clade with a non-subtending ancestral node is listed before it if [ labels[i], labels[i - 4], ] not in comparisons: # if this comparison between nodes has not already been made if alleles[i] != alleles[i - 4]: # if there was a mutation mutations += 1 comparisons.append([labels[i], labels[i - 4]]) else: comparisons.append([labels[i], labels[i - 4]]) elif labels[i] == current_taxon: # if the label is the current tip taxon if labels[i - 1] >= root: # if i - 1 is the subtending node if alleles[i] != alleles[i - 1]: # if there was a mutation mutations += 1 elif labels[i - 2] >= root: # if i-2 is the subtending node if alleles[i] != alleles[i - 2]: # if there was a mutation mutations += 1 current_taxon += 1 # update current taxon return mutations
def find_smp_rt(rt1, rt2): """ determine the sample rates - highest and lowest :param rt1: list of the first file :param rt2: list of the second file :return: a tuple with the needed values of the sample rates """ return (min(rt1, rt2), max(rt1, rt2))
def _patsplit(pattern, default): """Split a string into the optional pattern kind prefix and the actual pattern.""" if ':' in pattern: kind, pat = pattern.split(':', 1) if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', 'listfile', 'listfile0', 'set'): return kind, pat return default, pattern
def remove_comments(line): """Remove comments (#) from FIELD, CONFIG, CONTROL lines Also compress any mulitple white spaces. """ line = line.split("#")[0] line = " ".join(line.split()) return line.strip()
def stagger_tuple(elements_list, initial=None): """ Converts a list of objects into a staggered tuple Example: [1, 2, 3, 4, 5] [(1, 2), (2, 3), (3, 4), (4, 5)] """ res = [] previous_element = initial for element in elements_list: if previous_element is not None: res.append((previous_element, element)) previous_element = element return res
def best_match(long_seq, short_seq): """ First, know the len of long_seq and short_seq, and know how many times the long_seq string can loop in order to find out the best match string. Second, for each loop we count how many characters that look the same in each sub string(sub the long_seq) and assigns the counts to the max_same. Third, we return the sub string that includes the most same alphabet to the ans. :param long_seq: string, the DNA string that you will find which part is similar to short_seq. :param short_seq: string, the DNA string that you want to match. :return: ans: string, the best match string in the long_seq DNA string. """ l = len(long_seq) s = len(short_seq) same = 0 max_same = 0 ans = '' for i in range(l - s + 1): sub_l = long_seq[i:i+s] same = 0 for j in range(s): if sub_l[j] == short_seq[j]: same += 1 if same > max_same: max_same = same ans = sub_l return ans
def process_singleton_ids(ids, entity): """Returns a single id if len(ids)==1. If len(ids)>1, returns ids (assumed to be a list), or raises a ValueError if the shared functions expect a single id rather than a list for the specified entity. """ if len(ids)==1: ids = ids[0] elif entity=='gbd_round': # Also version id's? # It might be better to just let the shared functions raise an exception # rather than me doing it for them. In which case this function would be almost pointless... raise ValueError(f"Only single {entity} id's are allowed in shared functions.") return ids
def InvertDictionary(origin_dict): """Invert the key value mapping in the origin_dict. Given an origin_dict {'key1': {'val1', 'val2'}, 'key2': {'val1', 'val3'}, 'key3': {'val3'}}, the returned inverted dict will be {'val1': {'key1', 'key2'}, 'val2': {'key1'}, 'val3': {'key2', 'key3'}} Args: origin_dict: A dict mapping each key to a group (collection) of values. Returns: An inverted dict mapping each key to a set of its values. """ new_dict = {} for origin_key, origin_values in origin_dict.items(): for origin_value in origin_values: new_dict.setdefault(origin_value, set()).add(origin_key) return new_dict
def kebab_case_to_camel_case(s): """ convert a kebab case string to a camel case string A utility function to help convert a kebab case string into a camel case string. This is to help convert directive options typically defined in kebab case to Confluence macro parameters values which are typically required to be in camel case. Args: s: the string to convert Returns: the converted string """ s = ''.join(list(map(lambda x: x.capitalize(), s.split('-')))) s = s[0].lower() + s[1:] return s
def walkable(grid_array: list) -> list: """Get a list of all the coordinates that are walkable Args: grid_array (list): map of the terrain Returns: list: all coordinates that are walkable """ walkable = [] for i, row in enumerate(grid_array): for j, cell in enumerate(row): if cell == 0: walkable.append((i, j)) return walkable
def cart_comp(x,y): """ A basic comparison function to insure that layers are selected in terms of larger positive to larger negative """ return int(y-x)
def is_commconnect_form(form): """ Checks if something is a commconnect form, by manually inspecting the deviceID property """ return form.get('form', {}).get('meta', {}).get('deviceID', None) == 'commconnect'
def get_ls(num): """ Get a list of available line styles """ ls = [ ':', '--', '-.', '-', ':', '--', '-.', '-', ':', ':', '--', '-.', '-', ':', '--', '-.', '-', ':' ] return ls[:num]
def __shapes_are_equal(shape1, shape2, named_dim_length): """ compare the shape of two array. zeros in the second arguments are ignored Parameters ---------- shape1 : tuple shape2 : tuple named_dim_length : dict Returns ------- bool True, if both shapes are identical """ if len(shape1) != len(shape2): return False for i in range(len(shape2)): if type(shape2[i]) == str: # this and all following dimensions have to match if shape2[i] in named_dim_length: shape2_entry = named_dim_length[shape2[i]] else: if shape2[i].endswith(":"): named_dim_length[shape2[i]] = shape1[i:] else: named_dim_length[shape2[i]] = shape1[i] continue else: shape2_entry = shape2[i] if type(shape2_entry) == tuple: if shape1[i:] != shape2_entry: return False elif shape1[i] != shape2_entry and shape2_entry != 0: return False return True
def convert_REC_ID_to_DD_ID(REC_ID): """ Convert IDs as they are in RECCON into original Daily Dialog IDs """ split, id = REC_ID.split('_') id = str(int(id) + 1) if split == 'tr': return 'dialogue-'+id elif split == 'va': return 'dialogue-'+str(int(id)+11118) assert(split=='te') return 'dialogue-'+str(int(id)+12118)
def assemble_config(aggregation_method, task2models): """Assemble the entire config for dumping to JSON. Args: aggregation_method (str): the aggregation method to use during ensemble prediction task2models (dict): mapping from task to the associated config_list of models Returns: (dict): dictionary representation of the ensemble config, ready for dumping to JSON """ return {'aggregation_method': aggregation_method, 'task2models': task2models}
def non_modal_batch_melt(Co, Do, F, P): """ non_modal_batch calculates the concentration of a given trace element in a melt produced from non modal batch melting of a source rock as described by Shaw (1970) equation 15. Inputs: Co = Concentration of trace element in the original solid Do = Bulk distribution coefficient for element when F = 0 F = Fraction of original solid melted (fraction of melt) P = Bulk distribution coefficient of the melting mineral assemblage Returns: Cl = concentration in the newly formed liquid Note: if Do and P are the same, then you effectively have modal batch melting """ Cl = Co * (1 / (F * (1 - P) + Do)) return Cl
def dataToPixels(z): """ Given a data-point value I{z} along the x (or y) axis, returns the number of pixels from the left (or bottom) of the subplot. """ return 250*(z+1.0)
def get_weight_by_name(module_weights, name): """ Retrieve parameter weight values from result of Module.get_weights() :param module_weights: all weights of a module, returned by Module.get_weights() :param name: :return: """ for w, w_name in module_weights: if name == w_name: return w return None
def computeBCC(data_str): """ data str= ASCII string XOR each chr in string returns a two char ASCII string """ bcc = 0 for d in data_str: # print d # d = ord(d) # bcc = bcc ^ ord(d) bcc = bcc ^ ord(d) # print bcc bcc = bcc & 127 # print bcc return bcc
def eval_phoneme_duration(phoneme_list, phoneme_dur_parser): """ obtain duration for each phoneme in the phoneme_list :param phoneme_list: a list of phoneme strings :param phoneme_dur_parser: a map from phoneme to its duration :return: a list of (phoneme, duration) pairs example: input: ['k', 'a2', 'er2', 'p', 'u3', 'p', 'ei2', 'uai4'] output:[('k', 18), ('a2', 32), ('er2', 40), ('p', 19), ('u3', 29), ('p', 19), ('ei2', 25), ('uai4', 38)] """ phoneme_duration_list = [] for ph in phoneme_list: assert ph in phoneme_dur_parser['duration'], '%s not found.' % ph ph_dur = (ph, int(phoneme_dur_parser['duration'][ph][1])) phoneme_duration_list.append(ph_dur) return phoneme_duration_list
def minimum(x,y,z): """ Determine the minum of three values min=x if(y<mi): mi=y if(z<mi): mi=z return mi """ return min(min(x,y),z)
def outdegree(graph): """ To find the out degree we need to scan through all the vertices and all the edges associated with that vertex so the time it takes is O(V+E) """ out = [] for i in range(len(graph)): out.append(len(graph[i])) return out
def calculateConsensus(u, d, t, U_sum, D_sum): """ Calcluate consensus score. This is a heuristic for the percentage of the community who finds a term useful. Based on the observation that not every user will vote on a given term, user reptuation is used to estimate consensus. As the number of voters approaches the number of users, the votes become more equitable. (See doc/Scoring.pdf for details.) :param u: Number of up voters. :param d: Number of donw voters. :param t: Number of total users. :param U_sum: Sum of up-voter reputation. :param D_sum: Sum of down-voter reputation. """ v = u + d R = U_sum + D_sum return (u + (float(U_sum)/R if R > 0 else 0.0) * (t-v)) / t if v else 0
def dumb_lp(text): """ :arg text: a String that contains the text that needs to be processed. :returns a list containing keywords extracted from the text """ keywords = [] command_flags = ['get', 'show', 'find', 'tell', 'what', 'how'] command_subjects = ['weather', 'joke', 'news', 'time'] text = text.split() flags_found = [flg for flg in command_flags if flg in text] subjects_found = [sbj for sbj in command_subjects if sbj in text] keywords.extend(subjects_found) return keywords
def symsplit(item): """ Splits symmetric molecule atom mapping expression into its component parts. Returns a list of atom mappings in the symmetric molecule. For example: symsplit('(abc;cba)') = ['abc','cba'] """ # Removing white space tmp=item.replace(' ','') # checking format if (item[0] != '(') or (item[-1] != ')'): print("error in", item,"!\n") else: tmp=tmp[1:-1].split(';') # checking sizes to make sure they are all the same sizet=len(tmp[0]) for t in tmp[1:]: if len(t) != sizet: print("Sizes of the symmetric molecules are not the same!\n") print(tmp) return tmp
def get_keys_of_max_n(dict_obj, n): """Returns the keys that maps to the top n max values in the given dict. Example: -------- >>> dict_obj = {'a':2, 'b':1, 'c':5} >>> get_keys_of_max_n(dict_obj, 2) ['a', 'c'] """ return sorted([ item[0] for item in sorted( dict_obj.items(), key=lambda item: item[1], reverse=True )[:n] ])
def is_valid_time_response(response): """ Returns true if a time response is valid. str -> bool """ try: return float(response) >= 0 except ValueError: return False
def is_posix_path3(my_path): """Return whether or not a given path is Posix-based.""" return "/" in str(my_path)
def find_stop_position(initial_path, starting_position): """Returns a suffix from a given path. :param initial_path: The path for which a suffix is wanted. :param starting_position: The starting position from the suffix. :return: A suffix for a given path. """ if isinstance(initial_path[0][0], tuple): path = list(zip(*initial_path))[0] else: path = initial_path for idx, pos in enumerate(path): if pos == starting_position: return initial_path[:idx + 1] raise RuntimeError("Stop position not found in path!")
def shout(word): """Return a string with three exclamation marks""" # Concatenate the strings: shout_word shout_word = word + '!!!' # Replace print with return return(shout_word)
def get_month_days(month_number): """month_number = 1 in January month_number = 12 in December""" month_days = [31,28,31,30,31,30,31,31,30,31,30,31] return month_days[month_number-1]
def _key_is_valid(dictionary, key): """Test that a dictionary key exists and that its value is not blank.""" if key in dictionary: if dictionary[key]: return True return False
def dSigma_dCosT(eNu, cosT): """Return differential cross section in MeV^-2 as a function of the emission angle of the outgoing (detected) particle. Input: eNu: neutrino energy (MeV) cosT: cosine of the angle between neutrino and outgoing (detected) particle """ # Small values of cosT are preferred, see arXiv:hep-ex/0105068 (fig. 12,14). # However, energy dependence is unclear, so we use a constant value for now. if abs(cosT) > 1: return 0 return 0.5
def get_topic_link(text: str) -> str: """ Generate a topic link. A markdown link, text split with dash. Args: text {str} The text value to parse Returns: {str} The parsed text """ return f"{text.lower().replace(' ', '-')}"
def decode( string, dtype='U' ): """ short wrapper to decode byte-strings read from FRBcat """ if 'f' in dtype: if 'null' in string: return float('NaN') return float(string) return string
def series_sum(n): """Return nth item of sum series.""" total = 0 denom = 1 for i in range(n): total += 1 / denom denom += 3 return '{:.2f}'.format(total)
def joinSet(itemSet, removedSet, length): """Join a set with itself and returns the n-element itemsets""" filteredSet = set() flagAddItem = True composed_set = set([i.union(j) for i in itemSet for j in itemSet if len(i.union(j)) == length]) newremovedSet = set() for item in composed_set: if removedSet != set([]): for itemR in removedSet: if itemR.issubset(item): flagAddItem = False break if flagAddItem == True: filteredSet.add(item) else: newremovedSet.add(item) return filteredSet
def _get(obj, name): """ Get the indexable value with given `name` from `obj`, which may be a `dict` (or subclass) or a non-dict that has a `__getitem__` method. """ try: # try to get value using dict's __getitem__ descriptor first return dict.__getitem__(obj, name) except TypeError: # if it's a dict, then preserve the TypeError if isinstance(obj, dict): raise # otherwise try one last time, relying on __getitem__ if any return obj[name]
def round_digits( v: float, num_digits: int = 2, use_thousands_separator: bool = False ) -> str: """ Round digit returning a string representing the formatted number. :param v: value to convert :param num_digits: number of digits to represent v on None is (Default value = 2) :param use_thousands_separator: use "," to separate thousands (Default value = False) :returns: str with formatted value """ if (num_digits is not None) and isinstance(v, float): fmt = "%0." + str(num_digits) + "f" res = float(fmt % v) else: res = v if use_thousands_separator: res = "{0:,}".format(res) # type: ignore res_as_str = str(res) return res_as_str
def ensure_array(exemplar, item): """Coerces *item* to be an array (linear sequence); if *item* is already an array it is returned unchanged. Otherwise, an array of the same length as exemplar is created which contains *item* at every index. The fresh array is returned. """ try: item[0] return item except TypeError: return (item,) * len(exemplar)
def reverse_text_len(width, fs): """Approximation of text length""" return int(width / (0.6 * fs))
def str_to_integer(data): """Convert a stream of bytes representing a number into a single integer.""" value = 0 i = 0 for char in data: value += (char << i) i += 8 return value
def reverseComplement(seq): """ Returns the reverse complement of a DNA sequence, retaining the case of each letter""" complement = "" for base in seq: if base == "A": complement += "T" elif base == "T": complement += "A" elif base == "G": complement += "C" elif base == "C": complement += "G" elif base == "N": complement += "N" elif base == "a": complement += "t" elif base == "t": complement += "a" elif base == "g": complement += "c" elif base == "c": complement += "g" elif base == "n": complement += "n" elif base == "*": complement += "*" else: complement += base print( "Warning: reverse complement function encountered unknown base " + "'" + base + "'") reverseComplement = complement[::-1] return reverseComplement
def enthalpy_wall_del(T_0, T_w, C_p): """ Calculates specific enthalpy difference between total conditions and those at the stagnation point of a sphere in supersonic flow. Input variables: T_0 : Gas total temperature T_w : Sphere wall temperature C_p : Specific heat capacity """ del_h = C_p * (T_0 - T_w) return del_h
def vis321(n): """ 0 0 0 0 0 00 0 00 00 00 00 00 Number of Os: 4 6 8""" result = '' return result
def unconvert_from_RGB_255(colors): """ Return a tuple where each element gets divided by 255 Takes a (list of) color tuple(s) where each element is between 0 and 255. Returns the same tuples where each tuple element is normalized to a value between 0 and 1 """ un_rgb_color = (colors[0]/(255.0), colors[1]/(255.0), colors[2]/(255.0)) return un_rgb_color
def storage_descriptor(columns, location): """Dynamically build a Data Catalog storage descriptor with the desired columns and S3 location""" return { "Columns": columns, "Location": location, "InputFormat": "org.apache.hadoop.mapred.TextInputFormat", "OutputFormat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat", "SerdeInfo": { "SerializationLibrary": "org.apache.hadoop.hive.serde2.OpenCSVSerde", "Parameters": { "separatorChar": "\t" } }, "BucketColumns": [], # Required or SHOW CREATE TABLE fails "Parameters": {} # Required or create_dynamic_frame.from_catalog fails }
def is_task_terminal(task): """Return whether a given mesos task is terminal. Terminal states are documented in http://mesos.apache.org/api/latest/java/org/apache/mesos/Protos.TaskState.html :param task: the task to be inspected :returns: a boolean indicating if the task is considered to be in a terminal state """ return task['state'] in ['TASK_ERROR', 'TASK_KILLED', 'TASK_FAILED', 'TASK_FINISHED']
def findMinHeightTrees(n, edges): """ :type n: int :type edges: List[List[int]] :rtype: List[int] """ if not edges or n==1: return [0] adj=[set() for i in range(n)] for i,j in edges: adj[i].add(j) adj[j].add(i) leaves=[nodeIndex for nodeIndex in range(n) if len(adj[nodeIndex])==1] while n>2: n-=len(leaves) newLeaves=[] for leaf in leaves: adjLeaf=adj[leaf].pop() adj[adjLeaf].remove(leaf) if len(adj[adjLeaf])==1: newLeaves.append(adjLeaf) leaves=newLeaves return leaves
def get_length_of_missing_array(array_of_arrays): """ Sort the list in ascending order and extract the lengths of each list into another list Loop through the list of lengths searching to the greatest distance between 2 elements if the distance is greater than 1, add 1 to the previous element (or subtract 1 from next element) return the sum of this operation :param array_of_arrays: :return: The length of the missing array """ indx, m = 0, 0 if array_of_arrays is None or [] in array_of_arrays: return 0 if None in array_of_arrays: return 0 len_list = [len(lst) for lst in sorted(array_of_arrays, key=len)] while indx <= len(len_list): try: if (len_list[indx + 1] - len_list[indx]) > 1: m = len_list[indx + 1] - 1 indx += 1 except IndexError: break return m
def _relative_channels(channels, adjacency): """Renumber channels from absolute indices to relative indices, to match arrays used in the detection code. Parameters ---------- channels : dict A dict {group: list_of_channels} adjacency : dict A dict {group: set_of_neighbors} """ ch_out = {} adj_out = {} mapping = {} offset = 0 for group in channels: ch = channels[group] n = len(ch) ch_out[group] = [i + offset for i in range(n)] # From absolute to relative indices. mapping.update({c: (i + offset) for i, c in enumerate(ch)}) offset += n # Recreate the adjacency dict by applying the mapping to # both the keys and values. for c, i in mapping.items(): adj_out[i] = set(mapping[_] for _ in adjacency.get(c, set()) if _ in mapping) return ch_out, adj_out
def get_percentage(num_voters, lang_votes): """Gets percentage sum given list of vote counts and total voters.""" return sum([int(100 * i / num_voters + 0.5) for i in lang_votes])
def merge_dictionaries(a, b, path=None, update=True): """ Merge two dictionaries recursively. From https://stackoverflow.com/a/25270947 """ if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dictionaries(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value elif isinstance(a[key], list) and isinstance(b[key], list): for idx, val in enumerate(b[key]): a[key][idx] = merge_dictionaries(a[key][idx], b[key][idx], path + [str(key), str(idx)], update=update) elif update: a[key] = b[key] else: raise Exception('Conflict at %s' % '.'.join(path + [str(key)])) else: a[key] = b[key] return a
def disambiguate_language(text, assumed_languages, db): """ Some Wiktionary links simply point to a term without specifying what language it's in. In that case, we have to guess. The possible languages are: - The language of the Wiktionary it's in - The language of the other term in the assertion We accept one of the possible languages if we have seen the term defined in that language in Wiktionary. Ideally, this leaves us with one possibility. But if we're left with 2 or 0, we default to the language of the other term. """ if len(assumed_languages) == 1: return assumed_languages[0] ok_languages = [] for language in assumed_languages: c = db.cursor() c.execute('SELECT * from titles where language=? and title=? limit 1', (language, text)) if c.fetchone(): ok_languages.append(language) if len(ok_languages) == 0: return None else: return ok_languages[0]
def depth_first(start, children_func): """Return a depth-first traversal of a tree. Args: start: the root of the tree. children_func: function taking a node to its sequence of children. Returns: a list of nodes in depth-first order """ seen = set() result = [] def traversal(node): if node in seen: return seen.add(node) for i in children_func(node): traversal(i) result.append(node) traversal(start) return result
def _cmp_tm( lhs, rhs ) : """ compare two tm structs """ if lhs[0] != rhs[0] : return False ; if lhs[1] != rhs[1] : return False ; if lhs[2] != rhs[2] : return False ; if lhs[3] != rhs[3] : return False ; if lhs[4] != rhs[4] : return False ; if lhs[5] != rhs[5] : return False ; if lhs[8] >= 0 and rhs[8] >= 0 : if lhs[8] != rhs[8] : return False ; return True ;
def get_title_from_content(content): """ Generates a title from the content using the first line and stripping away whitespace and hash signs :content: str :returns: str """ title = content.split('\n', 1)[0].replace('#', '').lstrip()[0:50] return title
def _ensureListOfLists(iterable): """ Given an iterable, make sure it is at least a 2D array (i.e. list of lists): >>> _ensureListOfLists([[1, 2], [3, 4], [5, 6]]) [[1, 2], [3, 4], [5, 6]] >>> _ensureListOfLists([1, 2]) [[1, 2]] >>> _ensureListOfLists(1) [[1]] :param iterable: The iterable of which to make sure it is 2D :return: A guaranteed 2D version of ``iterable`` """ try: if len(iterable) > 0: try: if len(iterable[0]) > 0: return iterable except TypeError: return [iterable] except TypeError: return [[iterable]]
def get_box(pts): """Returns tight fitting bounding box (axis aligned) around set of points """ assert len(pts) it = iter(pts) ll = list(next(it)) ur = list(ll[:]) for pt in it: if pt[0] < ll[0]: ll[0] = pt[0] if pt[1] < ll[1]: ll[1] = pt[1] if pt[0] > ur[0]: ur[0] = pt[0] if pt[1] > ur[1]: ur[1] = pt[1] return tuple(ll), tuple(ur)
def encode_bool(value: bool) -> bytes: """Encodes a boolean. """ return bytes([int(value)])
def create(row_num, col_num, val = None): """ :param row_num: the number of rows :type row_num: int :param col_num: the number of columns :type col_num: int :param val: the default value to fill the matrix :type val: any (None by default) :return: matrix of rows_num x col_num :rtype: matrix """ matrix = [] for i in range(col_num): col = [val] * row_num matrix.append(col) return matrix
def compute_position(positions, total, index, length): """ used to compute the position where the bar will be put """ return positions - total / 2 + length / 2 + index * length