content
stringlengths
42
6.51k
def recordvocab_create_values( coll_id="testcoll", vocab_id="testvocab", vocab_uri="test:testvocab#", update="RecordVocab"): """ Entity values used when creating a namespace vocabulary entity """ d = ( { 'rdfs:label': "%s %s/_vocab/%s"%(update, coll_id, vocab_id) , 'rdfs:comment': "%s help for %s in collection %s"%(update, vocab_id, coll_id) , 'rdfs:seeAlso': [] }) if vocab_uri: d['annal:uri'] = vocab_uri return d
def _url_joiner(*args): """Helper: construct an url by joining sections with /""" return '/'.join(s.strip('/') for s in args)
def get_map_name(episode_index, map_index): """ Returns a map name from an episode and map index. @param episode_index: the index of the episode. If this is 0, a Doom 2 style MAPxx name is returned. @param map_index: the index of the map. @return: a ExMx map name if episode_index is non-zero, a MAPxx name otherwise. """ if episode_index == 0: return 'MAP{:0>2}'.format(map_index) else: return 'E{}M{}'.format(episode_index, map_index)
def _scale_dimensions(bound_box, factor): """ Scale dimensions of bounded box by a certain factor """ if (factor <= 0): raise Exception("scaling factor must be positive") exit(1) [[x_min, y_min], [x_max, y_max]] = bound_box x_avg_diff = (x_max-x_min) / 2.0 y_avg_diff = (y_max-y_min) / 2.0 x_scale = (factor - 1) * x_avg_diff y_scale = (factor - 1) * y_avg_diff return [[(x_min - x_scale), (y_min - y_scale)], [(x_max + x_scale), (y_max + y_scale)]]
def marketCap(price, shares, sharesUnit=1000): """ Market capitalization Returns FLOAT """ # SHROUT in thousands shares_out = shares * sharesUnit return price * shares_out
def get_vg_name_base(host_name, os_name): """Return a base name for a volume group based on the host and os names""" if host_name != None and len(host_name) != 0: vg_default = os_name + '_' + host_name else: vg_default = os_name return vg_default
def write_program_to_file(program, filename, memory_location, _LABELS): """ Take the assembled program and write to a given filename. Parameters ---------- program : list, mandatory The compiled program filename: str, mandatory The filename to write to memory_location: str, mandatory Location in memory of the first word of the program _LABELS: list, mandatory Label table Returns ------- True Raises ------ N/A Notes ------ N/A """ from datetime import datetime program_name = '"program":"' + filename + '"' m_location = '"location":"' + memory_location + '"' compdate = '"compile_date":"' + \ datetime.now().strftime("%d/%m/%Y %H:%M:%S") + '"' labels = '"labels":' + str(_LABELS).replace("'", '"') memorycontent = '"memory":[' for location in program: content = str(hex(location)[2:]) memorycontent = memorycontent + '"' + content + '", ' memory_content = memorycontent[:-2] + ']' json_doc = "{" json_doc = json_doc + program_name + ',' json_doc = json_doc + compdate + ',' json_doc = json_doc + m_location + ',' json_doc = json_doc + memory_content + ',' json_doc = json_doc + labels json_doc = json_doc + '}' with open(filename + '.obj', "w") as output: output.write(json_doc) with open(filename + '.bin', "w+b") as b: b.write(bytearray(program)) return True
def process_subtitle(data): """get subtitle group name from links """ result = {} for s in data: # pprint(data) # f = Subtitle(id=s['tag_id'], name=s['name']) # if not f.select(): # f.save() result[s["tag_id"]] = s["name"] return result
def map_color(text, key='fontcolor'): """Map the text to a color. The text is mapped to a color. :param text: string of text to be mapped to a color. 'error' and 'fail' in the text will map to 'red'. :param key: in returned dictionary, the key to use that corresponds to the color :returns: A dictionary with one entry, key = color. If no color is associated with the text, an empty dictionary. """ # If the text contains 'error'/'fail' then we'll return red... if 'error' in text or 'fail' in text: return {key: 'red'} else: return {}
def get_node_by_id(node_id, dialogue_node_list): """ Takes a node id and returns the node from the dialogue_node_list """ for dialogue_node in dialogue_node_list: if dialogue_node["Id"] == node_id: return dialogue_node return None
def escape(string): """Returns the given HTML with ampersands, quotes and carets encoded.""" return string.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;')
def between_markers(text: str, begin: str, end: str) -> str: """ returns substring between two given markers """ # your code here start = text.find(begin) + len(begin) if begin in text else 0 stop = text.find(end) if end in text else len(text) return text[start:stop]
def set_delete(form_value): """ Used with an HTML checkbox input. Return ``True`` if **form_value** is ``on``. """ if form_value == 'on': return True return False
def center(numpoints, refcoords): """Center a molecule using equally weighted points Parameters numpoints: Number of points refcoords: List of reference coordinates, with each set a list of form [x,y,z] (list) Returns refcenter: Center of the set of points (list) relcoords: Moved refcoords relative to refcenter (list) """ refcenter = [] relcoords = [] for i in range(3): refcenter.append(0.0) for i in range(numpoints): refcenter[0] += refcoords[i][0] refcenter[1] += refcoords[i][1] refcenter[2] += refcoords[i][2] for i in range(3): refcenter[i] = refcenter[i] / numpoints for i in range(numpoints): relcoords.append([]) relcoords[i].append(refcoords[i][0] - refcenter[0]) relcoords[i].append(refcoords[i][1] - refcenter[1]) relcoords[i].append(refcoords[i][2] - refcenter[2]) return refcenter, relcoords
def shift_coordinates_bottom_left(coords, size, binning=1): """ Given an XYZ tuple of particle coordinates and the reconstruction they came from, shift the coordinates so that the origin is at the bottom-left of the tomogram Args: coords: the (x, y, z) coordinates for the particle size: the reconstruction MRC half-dimensions in (nx/2, ny/2, nz/2) form binning: the bin factor from the original stack to the final reconstruction, to be used if you are using coordinates based on the original unbinned coordinate system Returns: the new coordinates as a (x, y, z) tuple """ return ( float(coords[0]) / binning + size[0], float(coords[1]) / binning + size[1], float(coords[2]) / binning + size[2], )
def computeBoxIndex(row: int, column: int) -> int: """The sudoku board has 9 "Boxes" where numbers need to be unique, this, given a row and column computes which box""" boxrow = row // 3 boxcol = column // 3 return boxrow * 3 + boxcol
def get_sns(data, sns): """Return value of `sns` in dict `data`.""" return data.get(sns)
def process_deployment_row(data): """ Process deployment information and present following attributes: Attribute Type Notes 1. 'eventId', integer 2. 'deploymentNumber', integer 3. 'editPhase', string One of 'EDIT', 'OPERATIONAL', 'STAGED'. 4. 'eventStartTime', long 5. 'eventStopTime', long 6. 'sensor_uid', string 7. 'mooring_uid', string 8. 'node_uid', string 9. 'rd', string 10. 'latitude', float 11 'longitude', float 12. 'depth' float """ deployment = {} try: # Create deployment row property_fields = ['eventId', 'deploymentNumber', 'editPhase', 'eventStartTime', 'eventStopTime'] for field in property_fields: if field in data: deployment[field] = data[field] # Get rd from referenceDesignator dictionary if 'referenceDesignator' in data: deployment['rd'] = data['referenceDesignator'] # Get location dictionary information: latitude, longitude, depth and orbitRadius if 'location' in data: if data['location']: if 'latitude' in data['location']: deployment['latitude'] = data['location']['latitude'] if 'longitude' in data['location']: deployment['longitude'] = data['location']['longitude'] if 'depth' in data['location']: deployment['depth'] = data['location']['depth'] # Get mooring, node and sensor uids. mooring_uid = None node_uid = None sensor_uid = None if 'mooring' in data: if data['mooring'] is not None: if 'uid' in data['mooring']: mooring_uid = data['mooring']['uid'] if 'node' in data: if data['node'] is not None: if 'uid' in data['node']: node_uid = data['node']['uid'] if 'sensor' in data: if data['sensor'] is not None: if 'uid' in data['sensor']: sensor_uid = data['sensor']['uid'] deployment['mooring_uid'] = mooring_uid deployment['node_uid'] = node_uid deployment['sensor_uid'] = sensor_uid return deployment except Exception as err: message = str(err) raise Exception(message)
def config_dict_to_flags(config): """Converts a dictioanry to command line flags. e.g. {a:1, b:2, c:True} to ['--a', '1', '--b', '2', '--c'] """ result = [] for key, val in config.items(): if key in {'job_name', 'output_dir', 'config'}: continue key = '--' + key if isinstance(val, bool) and (val is False): continue result.append(key) if not isinstance(val, bool): assert isinstance(val, (str, int, float)) result.append(str(val)) return result
def parse_int_tuple(tuple_str): """ convert str to integer tuple """ result = tuple(int(v) for v in tuple_str.split(",")) assert len(result) == 2 return result
def get_config_log(arguments): """Returns configuration related to logging :param arguments: A list of strings representing the arguments and their values, defaults to None :type arguments: str, optional :returns: Dictionary with the logging-related configuration :rtype: dict[str, str] """ log_config = {"log_file": "cibyl_output.log", "log_mode": "both", "debug": False} for i, item in enumerate(arguments[1:]): if item == "--log-file": log_config["log_file"] = arguments[i + 2] elif item == "--log-mode": log_config["log_mode"] = arguments[i+2] elif item == "--debug": log_config["debug"] = True return log_config
def matrix_to_tuple(array, empty_array): """ Given a 2D list, converts it to 2D tuple. This is useful for using a matrix as a key in a dictionary (an empty 8x8 should be provided, just for efficiency) """ for i in range(8): empty_array[i] = tuple(array[i]) return tuple(empty_array)
def excel_column_name(num): """Takes the 0-based index of a column, and returns its Excel-style name. >>> excel_column_name(0) 'A' >>> excel_column_name(1) 'B' >>> excel_column_name(23) 'X' >>> excel_column_name(26) 'AA' >>> excel_column_name(57) 'BF' """ A = ord('A') result = [] remaining = num + 1 while remaining > 0: modulo = (remaining - 1) % 26 result.append(chr(A + modulo)) remaining = (remaining - modulo) // 26 return ''.join(reversed(result))
def get_text_filename(book_id, chapter_num=-1): """ Get the filename for a text document with the book identifier and chapter number. """ if chapter_num != -1: filename = book_id + '-' + str(chapter_num) + '.txt' else: filename = book_id + ".txt" return filename
def get_html(filename_rel, height): """ Get the html to embed the given page into another page using an iframe. """ # Styles astyle = 'font-size:small; float:right;' dstyle = ('width: 500px; height: %ipx; align: center; resize:both; overflow: hidden; ' 'box-shadow: 5px 5px 5px #777; padding: 4px;') istyle = 'width: 100%; height: 100%; border: 2px solid #094;' # Show app in iframe, wrapped in a resizable div html = '' html += "<a target='new' href='%s' style='%s'>open in new tab</a>" % (filename_rel, astyle) html += "<div style='%s'>" % dstyle % height html += "<iframe src='%s' style='%s'>iframe not supported</iframe>" % (filename_rel, istyle) html += "</div>" return html
def get_line_row(lines, row): """ - lines: (Array string), array of lines - row: int, >=0, the row index to grab RETURN: string, if row greater than or equal to lines length, returns '' """ if row < len(lines): return lines[row] return ''
def build_map_index_structure(mapit): """ :param mapit: :return: """ index = [] maps = [] for block in mapit: index.append(block.name) tstrand = 1 if block.tstrand == '+' else 0 assert tstrand == 1, 'Negative target strand detected - invalid line in map: {}'.format(block) qstrand = 1 if block.qstrand == '+' else 0 maps.append((block.tstart, block.tend, tstrand, block.qstart, block.qend, qstrand)) assert len(index) == len(maps), 'Size mismatch between index and maps: {} vs {}'.format(len(index), len(maps)) assert maps or len(index) == 0, 'No mapping blocks extracted' return index, maps
def line(k, x): """ line function """ return k[0] * x + k[1]
def compare_version(current_version, fetched_version): """ Compare two versions and checks if fetched version greater than current version. :param current_version: Version Code obtained from Package Manager of host system :param fetched_version: Version code obtained from remote source :return: True if fetched_version > current_version, otherwise false """ current_version = str(current_version) fetched_version = str(fetched_version) current_parts = current_version.split(".") fetched_parts = fetched_version.split(".") part_length = max(len(current_parts), len(fetched_parts)) for k in range(0, part_length): current_compare_part = 0 fetched_compare_part = 0 if len(current_parts) > k: current_compare_part = current_parts[k] if len(fetched_parts) > k: fetched_compare_part = fetched_parts[k] if int(current_compare_part) < int(fetched_compare_part): return True else: return False
def parse_requirements(filename): """Load requirements from a pip requirements file.""" lineiter = (line.strip() for line in open(filename)) return [line for line in lineiter if line and not line.startswith("#")]
def readname2moviename(readname): """ ...doctest: >>> readname2moviename('movie/0/1_2 blaa') 'movie' """ return readname.split(' ')[0].split('/')[0]
def elements_for_apsides(apocenter_radius, pericenter_radius): """Calculate planar orbital elements for given apside radii.""" ra = apocenter_radius rp = pericenter_radius a = (ra + rp) / 2 e = (ra - rp) / (ra + rp) return a, e
def select_lm_state(lm_states, idx, lm_layers, is_wordlm): """Get LM state from batch for given id. Args: lm_states (list or dict): batch of LM states idx (int): index to extract state from batch state lm_layers (int): number of LM layers is_wordlm (bool): whether provided LM is a word-LM Returns: idx_state (dict): LM state for given id """ if is_wordlm: idx_state = lm_states[idx] else: idx_state = {} idx_state["c"] = [lm_states["c"][layer][idx] for layer in range(lm_layers)] idx_state["h"] = [lm_states["h"][layer][idx] for layer in range(lm_layers)] return idx_state
def _sort_values(val1, val2): """Return a tuple with the input values sorted.""" if val1 > val2: val1, val2 = val2, val1 return val1, val2
def longest_substring_using_lists(s: str) -> int: """ find the longest substring without repeating characters 644 ms 14.3 MB >>> longest_substring_using_lists("abac") 3 >>> longest_substring_using_lists("abcabcbb") 3 >>> longest_substring_using_lists("bbbbb") 1 >>> longest_substring_using_lists("pwwkew") 3 """ words = list() longest = 0 for char in s: # for each character removals = [] for word_idx in range(len(words)): # check all found words for the char word = words[word_idx] if char in word: # if it exists then set its length to longest if it is the longest longest = max(longest, len(word)) removals.append(word) else: # else add char to word words[word_idx] += char for remove in removals: words.remove(remove) # add char into words words.append(char) return max(longest, *[len(word) for word in words])
def records_preprocessing(traces): """ Method used to preprocess trace information in order to successfully create a DAG for a given request flow Parameters: traces (dict): Information about an individual trace Returns: node_mapping (dict): nodes and neighbors data for DAG """ trace_flows = {} new_flows = {} node_mapping = {} # 1. aggregate requests and responses by uuid for record in traces["records"]: try: trace_flows[record["uuid"]].append(record) except KeyError: trace_flows[record["uuid"]] = [] trace_flows[record["uuid"]].append(record) # 2. Sort request response flows for key in trace_flows: data = trace_flows[key] data.sort(key=lambda x: x["timestamp"]) trace_flows[key] = data # 3. Prune responses for key in trace_flows: data = trace_flows[key] data = data[:2] trace_flows[key] = data # 4. Prune failed requests and combine all records for key in trace_flows: data = trace_flows[key] if len(data) == 2: new_flows[key] = data # 5. Node to neighbors processing for key in new_flows: data = new_flows[key] node = data[0] neighbor = data[1] try: node_mapping[node["service"]].append(neighbor["service"]) except KeyError: node_mapping[node["service"]] = [] node_mapping[node["service"]].append(neighbor["service"]) # 6. Make sure all neighbors are unique for key in node_mapping: node_mapping[key] = list(set(node_mapping[key])) return node_mapping
def _getbce(obj, i): """Get broadcasted element""" if not isinstance(obj, list) or len(obj) == 0: return obj elif len(obj) == 1: return obj[0] elif len(obj) > i: return obj[i] else: raise IndexError("failed to broadcast")
def as_float(s): """Returns a float from a string """ if not s: return 0.0 return float(s)
def length_sort(list_a: list): """Problem 28: (a) Sort lists according to length of sublists Parameters ---------- list_a : list The input list Returns ------- list of list A list sorted according to length of sublists Raises ------ TypeError If the given argument is not of `list` type """ if not isinstance(list_a, list): raise TypeError('The argument given is not of `list` type.') return sorted(list_a, key=len)
def snake(string): """snake_case""" return "_".join(string.split())
def append_to_doc(doc, content): """ Append content to the doc string. Args: doc (str): The original doc string. content (str): The new doc string, which should be a standalone section. Returns: str: The modified doc string. """ content = '\n'.join(l.rstrip() for l in content.split('\n')) content = content.lstrip('\n') content = content.rstrip('\n') # fast path: doc is empty, just wrap the content if not doc: # the empty line before the content might be required for the # sphinx to correctly parse the section. if not content.startswith('\n'): content = '\n' + content if not content.endswith('\n'): content = content + '\n' return content # slow path: doc is not empty, parse it # find the indent of the doc string. indent = 0 for line in doc.split('\n'): if line and line.strip() and line.startswith(' '): for c in line: if c != ' ': break indent += 1 break indent = ' ' * indent # compose the docstring contents = [doc, '\n'] if not doc.endswith('\n'): contents.append('\n') for line in content.split('\n'): if not line.strip(): contents.append('\n') else: contents.append(indent + line + '\n') return ''.join(contents)
def build_senators(party, number_list): """ return a dictionary that contains the party and the number of senators e.g., for case #1: {'A': 2, 'B': 2}, case #2: {'A': 3, 'C': 2, 'B': 2} """ senators = dict() for p in range(party): alphabet = chr(p + ord('A')) numbers = int(number_list[p]) senators[alphabet] = numbers # print senators return senators
def transpose(table): """Returns: copy of table with rows and columns swapped Precondition: table is a (non-ragged) 2d List""" new_table = [] n_row = len(table) n_col = len(table[0]) for col in range(n_col): each_col = [] for row in range(n_row): each_col.append(table[row][col]) new_table.append(each_col) # print(table) # print(new_table) return new_table
def remove_character_under32(s): """ Removes :epkg:`ASCII` characters in *[0..31]*. @param s string to process @return filtered string """ ls = "" for c in s: d = ord(c) if 0 <= d < 32: ls += " " else: ls += c return ls
def _check_channels_spatial_filter(ch_names, filters): """Return data channel indices to be used with spatial filter. Unlike ``pick_channels``, this respects the order of ch_names. """ sel = [] # first check for channel discrepancies between filter and data: for ch_name in filters['ch_names']: if ch_name not in ch_names: raise ValueError('The spatial filter was computed with channel %s ' 'which is not present in the data. You should ' 'compute a new spatial filter restricted to the ' 'good data channels.' % ch_name) # then compare list of channels and get selection based on data: sel = [ii for ii, ch_name in enumerate(ch_names) if ch_name in filters['ch_names']] return sel
def onek_encoding_unk(x, allowable_set): """ Compute one-hot encoding of the given categorical value `x` """ if x not in allowable_set: x = allowable_set[-1] return list(map(lambda s: x == s, allowable_set))
def sort_keys(keys): """Sort, but arrange [category, tunnel:name, ref, <remaining in sorted order>]""" keys = sorted(keys) ix = keys.index('tunnel:name'); del keys[ix] ix = keys.index('ref'); del keys[ix] ix = keys.index('category'); del keys[ix] keys.insert(0, 'ref') keys.insert(0, 'tunnel:name') keys.insert(0, 'category') return keys
def combine_two_lists(list_1, list_2): """ Use the built-in 'zip' function: it takes two lists and combines the fist two elements into a tuple, than make the same thing for all the elements of the lists :param list_1: :param list_2: :return: """ my_list = [] for i in zip(list_1, list_2): my_list.append(i) return my_list
def _get_item_from_doc(doc, key): """ Get an item from the document given a key which might use dot notation. e.g. doc = {'deep': {'nested': {'list': ['a', 'b', 'c']}}} key = 'deep.nested.list.1' -> 'b' :param doc dict: :param key str: :rtype: value """ if '.' in key: item = doc for level in key.split('.'): if isinstance(item, list): try: level_int = int(level) except ValueError: return None try: item = item[level_int] except IndexError: return None elif isinstance(item, dict): item = item.get(level, {}) else: return None return item or None return doc.get(key)
def process_stream(g): """ Process a given stream into a nodes and edges dictionary. """ nodes = {} edges = {} for rec in g: if rec: if len(rec) == 4: key = (rec[0], rec[1]) if key in edges: edges[key].append(rec[-1]) else: edges[key] = [rec[-1]] else: nodes[rec[0]] = rec[-1] return nodes, edges
def getParamNamesFromFunction(func): """ Returns a human readable list of parameter names (sorted by their order in the given function) """ params = getattr(func, "params", None) if params: return [identifier.value for identifier in params] else: return None
def validate_sequence(sequence, sequence_type): """ This method is used to evalute the string passed to the Sequence constructor I used this https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=BlastHelp as reference for which bases and aminoacids representation are valid in FASTA files Parameters ------------- sequence : str The DNA/RNA/Protein sequence sequence_type : str The sequence type, it can be either 'DNA', 'RNA' or 'Protein' """ legal_values = [] if sequence_type == 'DNA': legal_values = ['A', 'G', 'C', 'W', 'S', 'M', 'K', 'R', 'Y', '-', 'B', 'V', 'D', 'H', 'N', 'T'] #list of possible Bases elif sequence_type == 'RNA': legal_values = ['A', 'G', 'C', 'W', 'S', 'M', 'K', 'R', 'Y', '-', 'B', 'V', 'D', 'H', 'N', 'U'] elif sequence_type == 'Protein': legal_values = [chr(i) for i in range(65, 91)] #again, this is based on the way in which aminoacids are represented in FASTA files legal_values.remove('J') legal_values.append('-') legal_values.append('*') for base in sequence: if base not in legal_values: return False return True
def check_type(item, expected_type): """ Checks a object to make sure that it is a certain type :param item: any type :param expected_type: string (ex:"str") :return: type """ item_type = str(type(item)) if "str" in expected_type.lower() and item_type == "<class 'str'>": pass elif "int" in expected_type.lower() and item_type == "<class 'int'>": pass elif "bool" in expected_type.lower() and item_type == "<class 'bool'>": pass elif "float" in expected_type.lower() and item_type == "<class 'float'>": pass elif "tuple" in expected_type.lower() and item_type == "<class 'tuple'>": pass elif "list" in expected_type.lower() and item_type == "<class 'list'>": pass elif "dict" in expected_type.lower() and item_type == "<class 'dict'>": pass elif "datetime" in expected_type.lower() and item_type == "<class 'datetime.datetime'>": pass elif "none" in expected_type.lower() and item_type == "<class 'NoneType'>": pass else: raise TypeError("{a} isn't a {b}".format(a=object, b=expected_type)) return item_type
def is_channel(string): """ Return True if input string is a channel """ return string and string[0] in "#&+!"
def findlower(x, vec): """return the index of the first occurence of item in vec""" for i in range(len(vec)): if vec[i] < x: return i return -1
def binary_encode(config, tot_configs): """ Args: config(int): the index of the configuration tot_configs(int): total number of configurations """ get_bin = lambda x: format(x, 'b') vb = get_bin(config) max_digit = len([i for i in (bin(tot_configs))[2:] if i.isdigit()]) letter = [int(char) for char in vb] letter = [0] * (max_digit - len(letter)) + letter return letter
def highest_divisor_power_of_2(n: int) -> int: """Return the maximum power of two that divides n. Return 0 for n == 0.""" return n & (~(n - 1))
def get_module_task_instance_id(task_instances): """ Return the first task instance that is a module node. """ for id in task_instances: if task_instances[id] == 'module_node': return id return None
def get_test_pipeline(): """Return an arbitrary pipeline definition.""" return { 'sg1': [ 'step1', 'step2', {'step3key1': 'values3k1', 'step3key2': 'values3k2'}, 'step4' ], 'sg2': False, 'sg3': 77, 'sg4': None }
def flatten(cmd, path="", fc=None, sep="."): """ Convert a nested dict to a flat dict with hierarchical keys """ if fc is None: fc = {} fcmd = fc.copy() if isinstance(cmd, dict): for k, v in cmd.items(): k = k.split(":")[1] if ":" in k else k fcmd = flatten(v, sep.join((path, k)) if path else k, fcmd) elif isinstance(cmd, list): for n, v in enumerate(cmd): fcmd.update(flatten(v, sep.join([path, str(n)]))) else: fcmd[path] = cmd return fcmd
def _to_appropriate_type(s): """convert string `s` to an int, bool, or float, as appropriate. Returns the original string if it does not appear to be any of these types.""" if s == 'True' or s == 'T': return True elif s == 'False' or s == 'F': return False try: return int(s) except: pass try: return float(s) except: pass return s
def charge_gen(item, atom_number, charge_list): """ Returns a float that represents the charge on a single bead. """ if atom_number in charge_list: return float(item['charge_max']) else: return 0.0 # use dictionary for complicated things # # # : {'style' : 'random'|'block', # 'homo' : True | False, # 'ratio' : 0.0 < x < 1.0, # 'arms' : pattern list form that recurs e.g. [0, 1, 0, 1] # 'blocks' : pattern but dict w/ block size e.g. {3: 1, 2: 0, 1: -1} # 'het' : {arm_index <int> : group <a-z> # check if groups exist # 'a' : 'blocks'|'arms' : {}|[] ... } # # return float(charge)
def strip_paths(paths): """ Remove repeated edges """ res_all = [] for path in paths: res = [] for node in path: if len(res) < 2: res.append(node) continue if node == res[-2]: res.pop() continue else: res.append(node) res_all.append(res) return res_all
def bottom_up_knapsack(profits, profits_length, weights, capacity): """ Parameters ---------- profits : list integer list containing profits profits_length : int number of profits in profit list weights : list integer list of weights capacity : int capacity of the bag Returns ------- int maximum profit >>> profits = [60, 100, 120] >>> profits_length = 3 >>> weights = [10, 20, 30] >>> capacity = 50 >>> bottom_up_knapsack(profits, profits_length, weights, capacity) 220 """ cache = [[0 for _ in range(capacity + 1)] for _ in range(profits_length + 1)] for p in range(1, profits_length + 1): for c in range(1, capacity + 1): if weights[p - 1] <= c: cache[p][c] = max(profits[p - 1] + cache[p - 1][c - weights[p - 1]], cache[p - 1][c]) else: cache[p][c] = cache[p - 1][c] selected_items = [] cap_index, weight_index = capacity, profits_length while cap_index > 0 and weight_index > 0: # if item is added to bag if cache[weight_index][cap_index] != cache[weight_index - 1][cap_index]: selected_items.append((weights[weight_index - 1], profits[weight_index - 1])) cap_index -= weights[weight_index - 1] weight_index -= 1 return cache[profits_length][capacity]
def kmlCoords(coords): """Convert from a sequence of floats to a comma delimited string""" return ','.join([str(x) for x in coords])
def multi_string(val): """Put a string together with delimiter if has more than one value""" if isinstance(val, (list, tuple)): return "\\".join(val) # \ is escape chr, so "\\" gives single backslash else: return val
def get_markdown_title_id(section_title): """Returns the HTML equivalent id from a section title Arguments: section_title -- Section title """ return section_title.replace(" ", "_").lower()
def validate_encryptionoption(encryption_option): """ Validate EncryptionOption for EncryptionConfiguration Property: EncryptionConfiguration.EncryptionOption """ VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION = [ "CSE_KMS", "SSE_KMS", "SSE_S3", ] if encryption_option not in VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION: raise ValueError( "EncryptionConfiguration EncryptionOption must be one of: %s" % ", ".join(VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION) # NOQA ) return encryption_option
def seconds_to_float(time_in_seconds): """Converts seconds to float rounded to one digit. Will cap the float at 9.9 or 594 seconds.""" if(time_in_seconds <= 594): return round((float(time_in_seconds) / 60.0), 1) return 9.9
def add_metadata_columns_to_schema(schema_message): """Metadata _sdc columns according to the stitch documentation at https://www.stitchdata.com/docs/data-structure/integration-schemas#sdc-columns Metadata columns gives information about data injections """ extended_schema_message = schema_message extended_schema_message['schema']['properties']['_sdc_extracted_at'] = { 'type': ['null', 'string'], 'format': 'date-time' } extended_schema_message['schema']['properties']['_sdc_batched_at'] = { 'type': ['null', 'string'], 'format': 'date-time' } extended_schema_message['schema']['properties']['_sdc_deleted_at'] = { 'type': ['null', 'string'] } return extended_schema_message
def isodd(number): """ Convenience function to determine if a value is odd. Returns boolean value or numpy array (depending on input) """ result = (number % 2) == 1 return result
def eq3 (A, B, C, T): """Chemsep equation 3 :param A: Equation parameter A :param B: Equation parameter B :param C: Equation parameter C :param T: Temperature in K""" return A + B*T + C*(T**2)
def get_item(dictionary, key): """Custom filter to retrieve a value from a dictionary""" return dictionary.get(key)
def getNameInBrackets(data_path): """ Return Blender node on a given Blender data path. """ if data_path is None: return None index = data_path.find("[\"") if (index == -1): return None node_name = data_path[(index + 2):] index = node_name.find("\"") if (index == -1): return None return node_name[:(index)]
def myhasattr(obj, name, _marker=object()): """Make sure we don't mask exceptions like hasattr(). We don't want exceptions other than AttributeError to be masked, since that too often masks other programming errors. Three-argument getattr() doesn't mask those, so we use that to implement our own hasattr() replacement. """ return getattr(obj, name, _marker) is not _marker
def parse_attributes( fields ): """Parse list of key=value strings into a dict""" attributes = {} for field in fields: pair = field.split( '=' ) attributes[ pair[0] ] = pair[1] return attributes
def int_to_point(i, grid_size): """ Convert a state int into the corresponding coordinate. i: State int. -> (x, y) int tuple. """ return (i % grid_size, i // grid_size)
def isolateNameToLabel(names): """Function to process isolate names to labels appropriate for visualisation. Args: names (list) List of isolate names. Returns: labels (list) List of isolate labels. """ # useful to have as a function in case we # want to remove certain characters labels = [name.split('/')[-1].split('.')[0] for name in names] return labels
def process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (dictionary) raw structured data to process Returns: List of dictionaries. Structured data with the following schema: [ { "destination": string, "gateway": string, "genmask": string, "flags": string, "metric": integer, "ref": integer, "use": integer, "mss": integer, "window": integer, "irtt": integer, "iface": string } ] """ for entry in proc_data: int_list = ['metric', 'ref', 'use', 'mss', 'window', 'irtt'] for key in int_list: if key in entry: try: key_int = int(entry[key]) entry[key] = key_int except (ValueError): entry[key] = None return proc_data
def resolve_exp_resp_string_as_list(element): """ User given comma separated data is converted into a list. If no data is given, then an empty list is formed. """ if element is not None and element is not False: if element == "": element = [] else: temp_list = element.split(",") for i in range(0, len(temp_list)): temp_list[i] = temp_list[i].strip() element = temp_list return element
def _split2(string, sep): """Split string in exactly two pieces, return '' for missing pieces.""" parts = string.split(sep, 1) + ["", ""] return parts[0], parts[1]
def bin_to_int(bin_string: str, *, zero: str = "0", one: str = "1") -> int: """ Convert a binary string to an integer. This function accepts optional keyword arguments to specify the characters used for 0 and 1 in the string to support different encodings. """ return int(bin_string.replace(zero, "0").replace(one, "1"), base=2)
def vector_subtract(v, w): """ subtracts corresponding elements """ return [v_i - w_i for v_i, w_i in zip(v, w)]
def is_edible(cell): """ Checks if a mushroom is edible """ lower_bits = cell & 0b111 return not (lower_bits - 1) & lower_bits
def _day_of_year_to_month_day(day_of_year, is_leap): """Core logic for turning days into months, for easy testing.""" february_bump = (2 - is_leap) * (day_of_year >= 60 + is_leap) august = day_of_year >= 215 month, day = divmod(2 * (day_of_year - 1 + 30 * august + february_bump), 61) month += 1 - august day //= 2 day += 1 return month, day
def xor(_a_, _b_): """XOR logical operation. :param _a_: first argument :param _b_: second argument """ return bool((not(_a_) and _b_) or (_a_ and not(_b_)))
def encode_html(unicode_data: str, encoding: str = 'ascii') -> bytes: """ Encode unicode_data for use as XML or HTML, with characters outside of the encoding converted to XML numeric character references. """ return unicode_data.encode(encoding, 'xmlcharrefreplace')
def build_call(*args): """Create a URL for a call to the OEC API""" call = 'http://atlas.media.mit.edu/' for val in args: call += str(val) + '/' return call
def getCallbackFlags(cmdInfo): """used parsed data and naming convention to determine which flags are callbacks""" commandFlags = [] try: flagDocs = cmdInfo['flags'] except KeyError: pass else: for flag, data in flagDocs.items(): if data['args'] in ['script', callable] or 'command' in flag.lower(): commandFlags += [flag, data['shortname']] return commandFlags
def helper_for_blocking_services_via_dns(service): """ Helper for hijacking a service via dns """ args = [] args.append("-iptables-hijack-dns-to") args.append("127.0.0.1:53") args.append("-dns-proxy-block") args.append(service) return args
def find_nestings(fields, prefix): """Recursively finds all reusable fields in the fields dictionary.""" nestings = [] for field_name, field in fields.items(): if 'reusable' in field: nestings.append(prefix + field_name) if 'fields' in field: nestings.extend(find_nestings(field['fields'], prefix + field_name + '.')) return nestings
def round_scores(student_scores): """Round all provided student scores. :param student_scores: list - float or int of student exam scores. :return: list - student scores *rounded* to nearest integer value. """ rounded = [] while student_scores: rounded.append(round(student_scores.pop())) return rounded
def _replace_small_values(the_list, threshold=0.01, replacement_value=0.0): """ replace small values in a list, this changes the list in place. :param the_list: the list to process. :param threshold: replace values lower than threshold. :param replacement_value: value to replace with. :returns: the number of items not replaced. """ rank = 0 for index, item in enumerate(the_list): if item < threshold: the_list[index] = replacement_value else: rank += 1 return rank
def getStarts(ranges) : """Get the start index for each range in a sequence.""" return [s for (s,e) in ranges]
def article_row_from_article_object(article): """ Args: article is a dictionary with the following keys: source - abc, cnn, etc author - name(s) of author(s) title - title of article description - brief description of article url - url of article urlToImage - honestly not sure publishedAt - date & time of publication content - slightly longer version of article text -------------------- fulltext - the fulltext object from the call to newspaper3k ------------------------ IBM_sentiment - the result of a call to IBM language API GOOGLE_sentiment - the result of a call to GOOGLE language API AYLIEN_sentiment - the result of a call to AYLIEN textapi API ------------------------ Returns a row for an article in the db """ title = article['title'] description = article['description'] url = article['url'] image_url = article['urlToImage'] source_id = article['source']['id'] datetime_published = article['publishedAt'] authors = article['author'] return [title, description, url, image_url, source_id, datetime_published, authors]
def merge_n_reduce(f, arity, data): """ Apply f cumulatively to the items of data, from left to right in n-tree structure, so as to reduce the data. :param f: function to apply to reduce data :param arity: Number of elements in group :param data: List of items to be reduced :return: List of results """ while len(data) > 1: data_chunk = data[:arity] data = data[arity:] data.append(f(*data_chunk)) return data[0]
def pass_intermediate_value(contents): """Grabs the content sent in by the user and acts as a intermediate callback, so that other callbacks can share the data the user sent. Args: contents: content sent in from the user and the upload-image object. Returns: contents: content sent in from the user and the upload-image object. """ if contents is not None: return contents
def _formatSeconds(seconds): """ Convert seconds to mm:ss Args: seconds (int): number of seconds Returns: string containing mm:ss """ return "{:02d}:{:02d}".format(int(seconds/60), int(seconds)%60)
def removePunctuation(s): """ Parameters ---------- s : string Song lyrics string Returns ------- converted : string A copy of s with removed punctuation and all letters turned to lowercase. """ s = s.lower() converted = '' for c in s: if c in "abcdefghijklmnopqrstuvwxyz ": converted += c return converted
def dateIsAfter(year1, month1, day1, year2, month2, day2): """Returns True if year1-month1-day1 is after year2-month2-day2. Otherwise, returns False.""" if year1 > year2: return True if year1 == year2: if month1 > month2: return True if month1 == month2: return day1 > day2 return False