content
stringlengths
42
6.51k
def getkw(kw, name): """ function to get a dictionary entry and remove it from the dictionary""" v = kw.get(name) if name in list(kw.keys()): del kw[name] return v
def angle(val): """Convert an integer value to a floating point angle.""" return val * 360. / 2**16
def author_list(authors): """Convert list of author DB objects to JSON.""" return [{"name": a.name, "email": a.email} for a in authors]
def end (cave): """Indicates whether or not `cave` is 'end'.""" return cave == 'end'
def get_attr_info(key, convention, normalized): """Get information about the MMD fields. Input ===== key: str MMD element to check convention: str e.g., acdd or acdd_ext normalized: dict a normalized version of the mmd_elements dict (keys are, e.g., 'personnel>organisation>acdd' or 'personnel>organisation>separator') Returns ======= required: int if it is required repetition: str ('yes' or 'no') if repetition is allowed repetition_str: str a longer string representation for use in the DMH (basically a comment) separator: str sign for separating elements that can be repeated (e.g., ',' or ';') default: a default value elements that are required but missing in the netcdf file """ max_occurs_key = key.replace(convention, 'maxOccurs') if max_occurs_key in normalized.keys(): max_occurs = normalized[max_occurs_key] else: max_occurs = '' repetition_allowed = 'yes' if max_occurs not in ['0', '1'] else 'no' min_occurs_key = key.replace(convention, 'minOccurs') if min_occurs_key in normalized.keys(): required = int(normalized[min_occurs_key]) else: required = 0 separator_key = key.replace(convention, 'separator') if separator_key in normalized.keys(): separator = normalized[separator_key] else: separator = '' default_key = key.replace(convention, 'default') if default_key in normalized.keys(): default = normalized[default_key] else: default = '' repetition_key = key.replace(convention, 'repetition') if repetition_key in normalized.keys(): repetition_str = normalized[repetition_key] else: repetition_str = '' return required, repetition_allowed, repetition_str, separator, default
def htk_int_to_float(value): """ Converts an integer value (time in 100ns units) to floating point value (time in seconds)... """ return float(value) / 10000000.0
def get_words(text): """ Return a list of dict words """ return text.split()
def collatz_sequence(n): """ Collatz conjecture: start with any positive integer n.Next term is obtained from the previous term as follows: if the previous term is even, the next term is one half of the previous term. If the previous term is odd, the next term is 3 times the previous term plus 1. The conjecture states the sequence will always reach 1 regaardless of starting value n. Example: >>> collatz_sequence(43) [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] """ sequence = [n] while n != 1: if n % 2 == 0: # even number condition n //= 2 else: n = 3 * n + 1 sequence.append(n) return sequence
def find_block_mesh_template(current_dir): """Create a static files folder for placing files that are not changed across the simulation space Args: current_dir (str): Current working directory. Returns: block_mesh_template_path (str): full path where the block mesh template exists. foam_files_templates (str): where the template files exist. Will be used to create cases. """ # NEED TO ADD SOME FUNCTIONALITY FOR IF THE FILE PATH ALREADY EXISTS IN THE SYSTEM foam_files_templates = current_dir + "//" + "foam_files_templates//" print("foam_files_templates directory") print(foam_files_templates) # Edit the blockmesh template: block_mesh_template_fname = "blockMeshDict_template" # full path: block_mesh_template_path = foam_files_templates + "system//" + block_mesh_template_fname print("blockmesh full path:") print(block_mesh_template_path) return block_mesh_template_path, foam_files_templates
def unprintable(mystring): """return only the unprintable characters of a string""" from string import printable return ''.join( character for character in mystring if character not in printable )
def _mk(bdd, i, l, h): """ mk function, will check to see if a node is already created for a variable, high, low triple. If not, makes one. """ #if high and low are the same if l == h: return l #if a node already exists if (i,l,h) in bdd["h_table"]: return bdd["h_table"][(i,l,h)] #else make a new node u = bdd["u"] + 1 #update BDD bdd["h_table"][(i,l,h)] = u bdd["t_table"][u] = (i,l,h) bdd["u"] = u return u
def find(node, key): """Find a node with a given key within a BST.""" # For a balanced BST, this func only takes O(log N); otherwise it's O(N) if node is None: return None if key == node.key: return node if key < node.key: return find(node.left, key) if key > node.key: return find(node.right, key)
def fgrep(text, term, window=25, with_idx=False, reverse=False): """Search a string for a given term. If found, print it with some context. Similar to `grep -C 1 term text`. `fgrep` is short for faux grep. Parameters ---------- text: str Text to search. term: str Term to look for in text. window: int Number of characters to display before and after the matching term. with_idx: bool If True, return index as well as string. reverse: bool If True, reverse search direction (find last match rather than first). Returns ------- str or tuple[int, str]: The desired term and its surrounding context. If the term isn't present, an empty string is returned. If with_idx=True, a tuple of (match index, string with text) is returned. """ idx = text.rfind(term) if reverse else text.find(term) if idx == -1: res = '' else: res = text[max(idx-window, 0):idx+window] return (idx, res) if with_idx else res
def decrypt(ciphertext, key, cipher): """ Using Vignere cipher to decrypt ciphertext. :param ciphertext: encrypted text to decrypt with given key and given cipher :param key: the key is appended until it has same length as ciphertext; each character of the key decides which cipher to use to subtitute the ciphertext character. :param cipher: from the cipher we create a map of shifted ciphers as many characters the cipher contains. :returns: decrypted ciphertext See: https://en.wikipedia.org/wiki/Vigen%C3%A8re_cipher >>> decrypt("LXFOPVEFRNHR", "LEMON", "ABCDEFGHIJKLMNOPQRSTUVWXYZ") 'ATTACKATDAWN' """ mapping = {cipher[0]: cipher} counter = len(cipher) shifted_cipher = cipher while counter > 0: shifted_cipher = shifted_cipher[-1] + shifted_cipher[0:len(shifted_cipher) - 1] mapping[shifted_cipher[0]] = shifted_cipher counter -= 1 ciphertext_key = "" while len(ciphertext_key) < len(ciphertext): for character in key: ciphertext_key += character if len(ciphertext_key) == len(ciphertext): break plaintext = "" for pos, character in enumerate(ciphertext): use_cipher = mapping[ciphertext_key[pos]] plaintext += cipher[use_cipher.index(character)] return plaintext
def greedy_cow_transport(cows, limit=10): """ Uses a greedy heuristic to determine an allocation of cows that attempts to minimize the number of spaceship trips needed to transport all the cows. The returned allocation of cows may or may not be optimal. The greedy heuristic should follow the following method: 1. As long as the current trip can fit another cow, add the largest cow that will fit to the trip 2. Once the trip is full, begin a new trip to transport the remaining cows Does not mutate the given dictionary of cows. Parameters: cows - a dictionary of name (string), weight (int) pairs limit - weight limit of the spaceship (an int) Returns: A list of lists, with each inner list containing the names of cows transported on a particular trip and the overall list containing all the trips """ # created list of cows sorted by weight descending sorted_cows = sorted(cows.items(), key = lambda x:x[1], reverse=True) # init list of chartered shuttles shuttles = [] # keep going to all cows are chartered on a shuttle! while sorted_cows: # init list of cows on current shuttle and set shuttle space limit this_shuttle = [] space_left = limit for i in range(len(sorted_cows)): # take heaviest cow in list (name, weight) = sorted_cows.pop(0) # try to fit on shuttle if weight <= space_left: this_shuttle.append(name) space_left -= weight # else put back into queue else: sorted_cows.append((name, weight)) # if shuttle has cargo, add to transport charter if this_shuttle: shuttles.append(this_shuttle) return shuttles
def _fuel_requirement(mass: int) -> int: """Calculates the fuel requirement for a given mass.""" return mass // 3 - 2
def remove_none(nums): """Get a list without Nones. Input `nums` can be list or tuple.""" return [x for x in nums if x is not None]
def get_source_with_id(result): """Return a document's `_source` field with its `_id` added. Parameters ---------- result : dict A document from a set of Elasticsearch search results. Returns ------- dict The document's `_source` field updated with the doc's `_id`. """ result['_source'].update({'_id': result['_id']}) return result['_source']
def calcStraightLine(x0, y0, x1, y1): """ calculates the slope and the axis intercept of a straight line using to points in an x, y grid :Parameters: x0, y0, x1, y1: float 2 points in an x, y grid :Returns: m, b: float the slope and the axis intercept of the calculated straight line """ m = (y1 - y0) /(x1 - x0) b = (x1*y0 - x0*y1)/(x1 - x0) return m, b
def euler28(n=1001): """Solution for problem 28.""" # For a level of size s (s>1), corners are : # s*s, s*s-s+1, s*s-2s+2, s*s-3s+3 # Their sum is 4*s*s - 6*s + 6 (3 <= s <= n) # Writing s = 2*j + 3, sum of corners is # 16*j*j + 36j + 24 # This sum could be computed in constant time but this is # fast enough. return 1 + sum(16 * j * j + 36 * j + 24 for j in range(n // 2))
def get_lr(epoch_size, step_size, lr_init): """ generate learning rate for each step, which decays in every 10 epoch Args: epoch_size(int): total epoch number step_size(int): total step number in each step lr_init(int): initial learning rate Returns: List, learning rate array """ lr = lr_init lrs = [] for i in range(1, epoch_size + 1): if i % 10 == 0: lr *= 0.1 lrs.extend([lr for _ in range(step_size)]) return lrs
def parse_lambda_config(x): """ Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly increase to 1 until iteration 2000 """ if isinstance(x, float): return x, None split = x.split(',') if len(split) == 1: return float(x), None else: split = [s.split(':') for s in split] assert all(len(s) == 2 for s in split) assert all(k.isdigit() for k, _ in split) assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)) return float(split[0][1]), [(int(k), float(v)) for k, v in split]
def str_or_blank(val) -> str: """Return a string or blank for None.""" if val is not None: return str(val) else: return ""
def create_crumb(title, url=None): """ Helper function """ if url: crumb = "> <li><a href='{}'>{}</a></li>".format(url, title) else: crumb = '> <li class="active">{}</a>'.format(title) return crumb
def ret_int(potential): """Utility function to check the input is an int, including negative.""" try: return int(potential) except: return None
def env_chk(val, fw_spec, strict=True, default=None): """ env_chk() is a way to set different values for a property depending on the worker machine. For example, you might have slightly different executable names or scratch directories on different machines. env_chk() works using the principles of the FWorker env in FireWorks. This helper method translates string "val" that looks like this: ">>ENV_KEY<<" to the contents of: fw_spec["_fw_env"][ENV_KEY] Otherwise, the string "val" is interpreted literally and passed-through as is. The fw_spec["_fw_env"] is in turn set by the FWorker. For more details, see: https://materialsproject.github.io/fireworks/worker_tutorial.html Since the fw_env can be set differently for each FireWorker, one can use this method to translate a single "val" into multiple possibilities, thus achieving different behavior on different machines. Args: val: any value, with ">><<" notation reserved for special env lookup values fw_spec: (dict) fw_spec where one can find the _fw_env keys strict (bool): if True, errors if env format (>><<) specified but cannot be found in fw_spec default: if val is None or env cannot be found in non-strict mode, return default """ if val is None: return default if isinstance(val, str) and val.startswith(">>") and val.endswith("<<"): if strict: return fw_spec["_fw_env"][val[2:-2]] return fw_spec.get("_fw_env", {}).get(val[2:-2], default) return val
def get_metrics_problem(metrics, problem): """ Function: get_metrics_problem Description: From a list of Metrics, returns only those that match with problem. Input: - metrics,list: List of Metric objects - problem,str: Type of problem Output: List of metric objects that match problem """ return [ e for e in metrics if (e.problem == problem or e.problem == "both") ]
def find_cell_index(x, lower, upper, delta): """Find the local index in 1D so that lower + i * delta <= x < lower + (i + 1) * delta. Arguments --------- x : float The target coordinate. """ if x < lower or x >= upper: return None return int((x-lower)//delta)
def list_product(some_list): """ Returns the product of all the elements in the input list. """ product = 1 for element in some_list: product *= element return product
def hex_str(raw: bytes) -> str: """ >>> hex_str(b"hello world!") '68 65 6C 6C 6F 20 77 6F 72 6C 64 21' """ encoded = raw.hex().upper() return " ".join(encoded[i : i + 2] for i in range(0, len(encoded), 2))
def break_into_sentences(data_in, exclude_toks=['<SOS>']): """ Break a list of tokens into a list of lists, removing EOS tokens and having each list be a standalone sentence :param data_in: long list of sentences or string of tokens :param exclude_toks: tokens to skip (like SOS. leave in EOS because we need it to know when to quit) :return: list of lists of tokens, each list is a sentence (master list is a collection of sentences) """ if isinstance(data_in, str): data_in = data_in.split() data_collector = [] curr_sent = [] for token in data_in: if token in exclude_toks: continue if token == '<EOS>': data_collector.append(curr_sent) curr_sent = [] continue curr_sent.append(token) if len(curr_sent) > 0: data_collector.append(curr_sent) return data_collector
def create_meta_value(value): """ This function is used to to transform the value output of Dobie to meta data value :param value: provided Dobie value :return: metadata value """ extracted_value = value.replace("+", "plus") split_value = extracted_value.split(" ") if len(split_value) > 1: processed_value = list(map(lambda word: word.title() if word != split_value[0] else word, split_value)) value_data = "".join(processed_value) else: value_data = extracted_value meta_value = "".join(list(map(lambda char: "dot" if char == "." and char == value_data[0] else char, value_data))) return meta_value
def getJoinRow(csvReader, joinColumn, joinValue): """ Look for a matching join value in the given csv filereader and return the row, or None. This assumes we're walking through the join file in one direction, so must be sorted on the join column. csvReader iterator must be wrapped with more_itertools.peekable() - see http://stackoverflow.com/a/27698681/243392 And if you use the openCsvReader fn to get the csvReader it will ignore blank lines, comments, and the first data line (the header). """ try: row = csvReader.peek() # will throw error if eof currentValue = row[joinColumn] # if blank row this will throw error also except: return None while currentValue < joinValue: try: csvReader.next() # pop row = csvReader.peek() currentValue = row[joinColumn] except: # eof return None if currentValue==joinValue: csvReader.next() # pop return row else: return None
def _parse_snapshots(data, filesystem): """ Parse the output of a ``zfs list`` command (like the one defined by ``_list_snapshots_command`` into a ``list`` of ``bytes`` (the snapshot names only). :param bytes data: The output to parse. :param Filesystem filesystem: The filesystem from which to extract snapshots. If the output includes snapshots for other filesystems (eg siblings or children) they are excluded from the result. :return list: A ``list`` of ``bytes`` corresponding to the names of the snapshots in the output. The order of the list is the same as the order of the snapshots in the data being parsed. """ result = [] for line in data.splitlines(): dataset, snapshot = line.split(b'@', 1) if dataset == filesystem.name: result.append(snapshot) return result
def c2f(celsius): """ Convert Celcius to Farenheit """ return 9.0/5.0 * celsius + 32
def create_legend_panel(workflow_stat): """ Generates the bottom level legend panel content. @param workflow_stat the WorkflowInfo object reference """ panel_str =""" <script type="text/javascript+protovis"> var bc_footerPanel = new pv.Panel() .width(bc_footerPanelWidth) .height(bc_footerPanelHeight) .fillStyle('white'); bc_footerPanel.add(pv.Dot) .data(bc_data) .left( function(d){ if(this.index == 0){ bc_xLabelPos = bc_label_padding; bc_yLabelPos = bc_footerPanelHeight - 15 ; }else{ if(bc_xLabelPos + bc_labelWidth > bc_w - (bc_label_padding + bc_labelWidth)){ bc_xLabelPos = bc_label_padding; bc_yLabelPos -=15; } else{ bc_xLabelPos += bc_labelWidth; } } return bc_xLabelPos;} ) .bottom(function(d){ return bc_yLabelPos;} ) .fillStyle(function(d) d.color) .strokeStyle(null) .size(49) .event("click", function(d) printTransformationDetails(d)) .anchor('right').add(pv.Label) .textMargin(6) .textAlign('left') .text(function(d) d.name); bc_footerPanel.render(); </script> """ return panel_str
def check_double_biconditional(x_value, y_value, z_value): """This function takes in three list of booleans and computes a double biconditional list. Args: x_value = a list of booleans y_value = a list of booleans z_value = a list of booleans Returns: double_biconditional_list: The computated and compared values of x_value, y_value and z_value. in terms of X <=> Y ^ Y <=> Z Notes: x => (y <=> z) can be translated to x => ( (y <=> z) and (z <=> y) ) x <-> y) and (y <-> z) can be loosley translated to (x -> y) and (y -> x) and (y -> z) and (z -> y) comparing two biconditional statements is not needed, by checking if each index is all true or all false. """ # Declaring empty list to store computed values double_biconditional_list = [] # Iterate through each value from all three lists for x, y, z in zip(x_value, y_value, z_value): # If all Indexes are not the same, then append false # (x -> y) and (y -> x) and (y -> z) and (z -> y) if x != y or y != x and y != z or z != y: double_biconditional_list.append(False) # else append True as the condition is met. else: double_biconditional_list.append(True) # Finally return the computed list. return double_biconditional_list
def glob_to_sql(string: str) -> str: """Convert glob-like wildcards to SQL wildcards * becomes % ? becomes _ % becomes \% \\ remains \\ \* remains \* \? remains \? This also adds a leading and trailing %, unless the pattern begins with ^ or ends with $ """ # What's with the chr(1) and chr(2) nonsense? It's a trick to # hide \* and \? from the * and ? substitutions. This trick # depends on the substitutions being done in order. chr(1) # and chr(2) were picked because I know those characters # almost certainly won't be in the input string table = ( (r"\\", chr(1)), (r"\*", chr(2)), (r"\?", chr(3)), (r"%", r"\%"), (r"?", "_"), (r"*", "%"), (chr(1), r"\\"), (chr(2), r"\*"), (chr(3), r"\?"), ) for (a, b) in table: string = string.replace(a, b) string = string[1:] if string.startswith("^") else "%" + string string = string[:-1] if string.endswith("$") else string + "%" return string
def convert_to_str(string): """Helper function to catch bytes as strings""" if type(string) is str: return string else: return bytes.decode(string)
def split_data_list(list_data, num_split): """ list_data: list of data items returning: list with num_split elements, each as a list of data items """ num_data_all = len(list_data) num_per_worker = num_data_all // num_split print("num_data_all: %d" % num_data_all) # data_split = [] posi_start = 0 posi_end = num_per_worker for idx in range(num_split): list_curr = list_data[posi_start:posi_end] data_split.append(list_curr) posi_start = posi_end posi_end += num_per_worker # if posi_start < num_data_all: data_split[-1].extend(list_data[posi_start:]) # list_num_data = [len(item) for item in data_split] print("list_data split: {}".format(list_num_data)) # return data_split #
def vlan_bitmap_undo(bitmap): """convert vlan bitmap to undo bitmap""" vlan_bit = ['F'] * 1024 if not bitmap or len(bitmap) == 0: return ''.join(vlan_bit) bit_len = len(bitmap) for num in range(bit_len): undo = (~int(bitmap[num], 16)) & 0xF vlan_bit[num] = hex(undo)[2] return ''.join(vlan_bit)
def GetPosTM_MSA(posTMList, specialProIdxList):# {{{ """ Get the beginning and end position of the MSA which has TM helices """ beg = 9999999999 end = -1 for i in range(len(posTMList)): if not i in specialProIdxList: posTM = posTMList[i] if posTM[0][0] < beg: beg = posTM[0][0] if posTM[-1][1] > end: end = posTM[-1][1] return (beg, end)
def uniquify_in_order(seq): """Produces a list with unique elements in the same order as original sequence. https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order """ seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
def remove_quotes(s, l, t): """ Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use remove_quotes to strip quotation marks from parsed results quoted_string.set_parse_action(remove_quotes) quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] """ return t[0][1:-1]
def escape(s): """Replace quotes with escaped quotes and line breaks with \n for JSON""" return s.replace('"', '\\"').replace("\n", "\\n")
def first_occ(added_symbol, proj): """ Computes the first first time "added_symbol" appears for each transaction in "proj" """ first = [] a = tuple() a += (added_symbol,) processed = proj.get(a[0], []) if not processed: return first last_trans_index = processed[0][0] first.append(processed[0]) for i in processed: if i[0] != last_trans_index: first.append(i) last_trans_index = i[0] return first
def get_nonce_bytes(n): """BOLT 8 requires the nonce to be 12 bytes, 4 bytes leading zeroes and 8 bytes little endian encoded 64 bit integer. """ return b"\x00"*4 + n.to_bytes(8, 'little')
def clean_latex_name(label): """ Convert possible latex expression into valid variable name """ if not isinstance(label,str): label = str(label) # -1- Supress \ label = label.replace('\\','') # -2- Supress '{' .. '}' label = label.replace('{','') label = label.replace('}', '') # -3- Replace '^' by '_' label = label.replace('^','_') # -4- Replace ',' by '_' label = label.replace(',','_') return label
def total_occurences(s1, s2, ch): """(str, str, str) -> int Precondition: len(ch) == 1 Return the total number of times ch appears in s1 and s2. >>> total_occurences('red', 'blue', 'u') 1 """ # total_occurences('Yellow', 'Blue', 'u') return (s1 + s2).count(ch)
def sum_sum(lists): """Returns total sum for list of lists.""" return sum(sum(x) for x in lists)
def spike_lmax(S, Q): """Maximum spike given a perturbation""" S2 = S * S return ((1.0 / Q) + S2) * (1 + (1.0 / S2))
def extractNew(data: str): """ Data -> just full type (e.g: new MyClass() -> MyClass). """ assert type(data) == str news = set() for i, c in enumerate(data): if data[i:i+len('extends')] == 'extends': rest = [] for j in range(i, len(data)): if data[j] == '\n': break if data[j] == '{': break rest.append(data[j]) rest = "".join(rest) news.add(rest[len('extends'):].strip()) if data[i:i+len('new')] == 'new': rest = [] for j in range(i, len(data)): if data[j] == '\n': break rest.append(data[j]) if data[j] == '(': break rest = "".join(rest) if rest[-1:] == '(': news.add(rest[len('new'):-1].strip()) return news
def buildEventString(events): """ Function to produce a string representing the event history of a single binary for quick readability. IN: events (list of tuples): events output from getEventHistory() OUT: eventString (string): string representing the event history of the binary MT strings look like: P>S, P<S, or P=S where P is primary type, S is secondary type, and >, < is RLOF (1->2 or 1<-2) or = for CEE SN strings look like: P*SR for star1 the SN progenitor,or R*SP for star2 the SN progenitor, where P is progenitor type, R is remnant type, S is state (I for intact, U for unbound) Event strings for the same seed are separated by the undesrcore character ('_') """ # Empty event if len(events) == 0: return 'NA' eventStr = '' # event string for this star for event in events: if event[0] == 'MT': # MT event eventStr += str(event[2]) # primary stellar type eventStr += '=' if event[6] else ('>' if event[4] else '<') # event type: CEE, RLOF 1->2, RLOF 2->1 eventStr += str(event[3]) # secondary stellar type else: # assume SN event (until other event types are added...) eventStr += str(event[2]) if event[4] == 1 else str(event[3]) # Progenitor or Remnant depending upon which star is the SN eventStr += '*U' if event[5] else '*I' # unbound or intact eventStr += str(event[3]) if event[4] == 1 else str(event[2]) # Progenitor or Remnant depending upon which star is the SN eventStr += '_' # event separator return eventStr[:-1] # return event string for this star (pop the last underscore first)
def _diff_env(a, b): """Return difference of two environments dict""" seta = set([(k, a[k]) for k in a]) setb = set([(k, b[k]) for k in b]) return dict(seta - setb), dict(setb - seta)
def solution(capacity, items): # O(M * N) """ Given the capacity of the knapsack and items specified by weights and values, return the maximum summarized value of the items that can be fit in the knapsack. Example: capacity = 5, items(value, weight) = [(60, 5), (50, 3), (70, 4), (30, 2)] result = 80 (items valued 50 and 30 can both be fit in the knapsack) >>> solution(5, [(60, 5), (50, 3), (70, 4), (30, 2)]) 80 """ result = [(0, 0)] * (capacity + 1) # O(1) for value, weight in items: # O(N) if weight > capacity: # O(1) continue # O(1) for i in range(1, len(result)): # O(M) calc_weight = max(weight + result[i - weight][1], \ result[i][1]) # O(1) if calc_weight <= i: # O(1) result[i] = ( max(value + result[i - weight][0], result[i][0]), calc_weight ) # O(1) return result[capacity][0] # O(1)
def IsCustomMetadataHeader(header): """Returns true if header (which must be lowercase) is a custom header.""" return header.startswith('x-goog-meta-') or header.startswith('x-amz-meta-')
def subdivide(R, C, max_size = 36): """splits array into subarrays of manageable size""" def size(y1, x1, y2, x2): return (y2 - y1 + 1) * (x2 - x1 + 1) def helper(y1, x1, y2, x2): nonlocal max_size if size(y1, x1, y2, x2) <= max_size: return [(y1, x1, y2, x2)] # divide along horizontal if y2 - y1 > x2 - x1: y = (y1 + y2) // 2 if (y - y1) & 1: return helper(y1, x1, y, x2) + helper(min(y+1, y2), x1, y2, x2) return helper(y1, x1, max(y-1, y1), x2) + helper(y, x1, y2, x2) #divide along vertical x = (x1 + x2) // 2 return helper(y1, x1, y2, x) + helper(y1, min(x+1, x2), y2, x2) return helper(0, 0, R, C)
def check_valid(word, letters): """Take a word as a dictionary and checks that is valid. :param word: Word as a dictionary :param letters: Letters as a dictionary :returns: Return False if not valid or deletes performed on letters """ curr_min = len(letters) # Minimum erases set to len of letters # Traverse each letter in word and their occurence for k, val in word.items(): # Check if the char (key) appears in letters if k in letters: # Check that there are less occurrences in val than letters if val > letters[k]: return False # Invalid word curr_min -= val # Remove char occurrences from curr_min # Key does not appear in letters else: return False # Invalid word return curr_min
def check_iterable_item_type(iter_obj): """ Check if all items within an iterable are the same type. Args: iter_obj: iterable object Returns: iter_type: type of item contained within the iterable. If the iterable has many types, a boolean False is returned instead. References: http://stackoverflow.com/questions/13252333/python-check-if-all-elements-of-a-list-are-the-same-type """ iseq = iter(iter_obj) first_type = type(next(iseq)) return first_type if all((type(x) is first_type) for x in iseq) else False
def get_boresights(bore_files): """ Extract data from five-point boresight files. @param bore_files : Full paths to the boresight files. @type bore_files : list @return: (dict) The keys are:: - X_BW: measured beamwidth in cross-elevation or cross-declination - DOY: day of year - UTC: coordinated universal time - SOURCE: source name - HA: hour angle - DEC: declination - AZ: azimuth - EL: elevation - AX/HXPOS: azimuth/hour angle map position - EL/DEPOS: elevation/declination map position - FREQ: frequency - POL: polarization - ELOFF: elevation offset - XELOFF: cross-elevation offset - DECOFF: declination offset - XDECOFF: cross-declination offset - XPOS: subreflector X position - YPOS: subreflector Y position - ZPOS: subreflector Z position - YOFF: subreflector Y offset - ZOFF: subreflector Z offset - BASE+: baseline in the scan + direction, e.g.: bore_data['fivept.pri']['BASE+'] = ['40.7885'] - BL+: bore_data['fivept.pri']['BL+'] = ['40.5802'] - BL-: bore_data['fivept.pri']['BL-'] = ['40.6755'] - BASE-: baseline in the scan - direction, e.g.: bore_data['fivept.pri']['BASE-'] = ['40.6401'] - DELTA+: bore_data['fivept.pri']['DELTA+'] = ['0.0304588', 0.124607'] - DELTA(0): bore_data['fivept.pri']['DELTA(0)'] = ['0.0985753', 0.0985793'] - DELTA-: bore_data['fivept.pri']['DELTA-'] = ['0.0359992', 0.0969167'] - RMS: bore_data['fivept.pri']['RMS'] = ['0.0000','0.0000'] - ERR: bore_data['fivept.pri']['ERR'] = ['0.0000'] - POS: bore_data['fivept.pri']['POS'] = ['0.0000'] - X_ERR: bore_data['fivept.pri']['X_ERR'] = ['0.0000'] - NUMPTS: bore_data['fivept.pri']['NUMPTS'] = ['0', '0'] - BW: bore_data['fivept.pri']['BW'] = ['0.0000'] - ON(1): bore_data['fivept.pri']['ON(1)'] = ['0', '0'] - ON(2): bore_data['fivept.pri']['ON(2)'] = ['0', '0'] """ print("Bore files:",bore_files) data = {} for bore_file in bore_files: print("Bore file:",bore_file) fd = open(bore_file,'r') # The first line has the keys for the columns keys = fd.readline().strip().split() for key in keys: if key not in data: data[key] = [] # The remaining lines are data lines = fd.readlines() fd.close() for line in lines: l = line.strip().split() if len(l) == 0: pass for item in range(len(l)): data[keys[item]].append(l[item]) return data
def minimax_jit(maxing_player, next_depth, worth, alpha, beta): """ jit version of the minimax logic. """ worth = max(next_depth, worth) if maxing_player else min(next_depth, worth) if maxing_player: alpha = max(alpha, worth) else: beta = min(beta, worth) return beta, alpha, worth
def is_switchport_default(existing): """Determines if switchport has a default config based on mode Args: existing (dict): existing switcport configuration from Ansible mod Returns: boolean: True if access port and access vlan = 1 or True if trunk port and native = 1 and trunk vlans = 1-4094 else False Note: Specific for Ansible module(s). Not to be called otherwise. """ default = False mode = existing['mode'] if mode == 'access': if existing['access_vlan'] == '1': default = True elif mode == 'trunk': if existing['native_vlan'] == '1': if existing['trunk_vlans'] == '1-4094': default = True return default
def format_arg_value(arg_val): """ Return a string representing a (name, value) pair. Examples: >>> format_arg_value(('x', (1, 2, 3))) 'x=(1, 2, 3)' """ arg, val = arg_val return "%s=%r" % (arg, val)
def denormalize(column, startvalue, endvalue): """ converts [0:1] back with given start and endvalue """ normcol = [] if startvalue>0: if endvalue < startvalue: raise ValueError("start and endval must be given, endval must be larger") else: for elem in column: normcol.append((elem*(endvalue-startvalue)) + startvalue) else: raise ValueError("start and endval must be given as absolute times") return normcol
def deal_new_stack(cards): """Return reversed cards.""" cards.reverse() return cards
def classify_platform(os_string): """ :type os_string: str :return: str """ os_string = os_string.strip() if os_string == "Windows" or os_string.startswith("IBM"): return "Windows" elif os_string == "Mac" or os_string.startswith("APL"): return "Mac" elif os_string == "Linux" or os_string.startswith("LIN"): return "Linux" else: return "Dafuq?"
def toX4(x, y=None): """ This function is used to load value on Plottable.View Calculate x^(4) :param x: float value """ return x * x * x * x
def urljoin(*args): """ Joins given arguments into an url. Trailing but not leading slashes are stripped for each argument. """ url = "/".join(map(lambda x: str(x).rstrip('/'), args)) if '/' in args[-1]: url += '/' if '/' not in args[0]: url = '/' + url url = url.replace('//', '/') return url
def check_ssl(aba, bab): """ checks if any matches of valid aba & bab are found """ for a in aba: for b in bab: if a[0] == b[1] and b[0] == a[1]: return True return False
def get_func_name(func): """Get name of a function. Parameters ---------- func: Function The input function. Returns ------- name: str The function name. """ return func.func_name if hasattr(func, "func_name") else func.__qualname__
def validate(data_dict, required, optional, filter_unknown_fields=False): """Make sure all required fields are present and all other fields are optional. If an unknown field is found and filter_unknown_fields is False, return an error. Otherwise, just filter the field out.""" final = {} for key in required: if not data_dict.get(key): return "missing required key: " + key for key in data_dict: if key in required or key in optional: final[key] = data_dict[key] elif not filter_unknown_fields: return "unknown key: " + key return final
def calculate_heat_index(temp, hum): """ :param temp: temperature in degrees fahrenheit :param hum: relative humidity as an integer 0-100 :return: heat index """ humidity = hum temperature = float(temp) c1 = -42.379 c2 = 2.04901523 c3 = 10.14333127 c4 = -0.22475541 c5 = -6.83783e-3 c6 = -5.481717e-2 c7 = 1.22874e-3 c8 = 8.5282e-4 c9 = -0.00000199 return int(round(c1 + (c2*temperature) + (c3*humidity) + (c4*temperature*humidity) + (c5*(temperature**(2))) + (c6*(humidity**(2))) + (c7*(temperature**(2))*humidity) + (c8*temperature*(humidity**(2))) + (c9*(temperature**(2))*(humidity**(2)))))
def format_board_time(dt): """ Format a time for the big board """ if not dt: return '' return '{d:%l}:{d.minute:02}'.format(d=dt) + ' EST'
def is_edit_mode(request): """ Return whether edit mode is enabled; output is wrapped in ``<div>`` elements with metadata for frontend editing. """ return getattr(request, "_fluent_contents_edit_mode", False)
def _get_target_columns(metadata): """Given ModelPipeline metadata, construct the list of columns output by the model's prediction method. """ target_cols = [] if metadata['model']['type'] == 'classification': # Deal with multilabel models, if necessary if len(metadata['data']['target_columns']) > 1: for i, col in enumerate(metadata['data']['target_columns']): for cls in metadata['data']['class_names'][i]: target_cols.append(col + '_' + str(cls)) else: col = metadata['data']['target_columns'][0] for cls in metadata['data']['class_names']: target_cols.append(col + '_' + str(cls)) else: for col in metadata['data']['target_columns']: target_cols.append(col) return target_cols
def list_data(args, data): """List all servers and files associated with this project.""" if len(data["remotes"]) > 0: print("Servers:") for server in data["remotes"]: if server["name"] == server["location"]: print(server["user"] + "@" + server["location"]) else: print( server["user"] + "@" + server["name"] + " (" + server["location"] + ")") else: print("No servers added") print("Included files and directories:") print(data["file"] + ".py") if len(data["files"]) > 0: print("\n".join(data["files"])) return data
def NewerDataThanExtrapolation(TopLevelInputDir, TopLevelOutputDir, SubdirectoriesAndDataFiles): """Find newer data than extrapolation.""" from os.path import exists, getmtime Newer = [] for Subdirectory, DataFile in SubdirectoriesAndDataFiles: FinishedFile = "{}/{}/.finished_{}".format(TopLevelOutputDir, Subdirectory, DataFile) if exists(FinishedFile): TimeFinished = getmtime(FinishedFile) Timemetadata = getmtime(f"{TopLevelInputDir}/{Subdirectory}/metadata.txt") TimeData = getmtime(f"{TopLevelInputDir}/{Subdirectory}/{DataFile}") if TimeData > TimeFinished or Timemetadata > TimeFinished: Newer.append([Subdirectory, DataFile]) return Newer
def getValue(valOb): """ Return the value (for toi attributes it could be the object) of an attribute. Arguments: The value object Returns: The value """ if hasattr(valOb, 'value'): return valOb.value return valOb
def is_suspicious(transaction: dict) -> bool: """Determine whether a transaction is suspicious.""" return transaction["amount"] >= 900
def find(haystack, needle): """ >>> find("ll", "hello") -1 >>> find("", "") 0 >>> find("hello", "ll") 2 >>> find("aaaaabba", "bba") 5 >>> find("bbaaaaaa", "bba") 0 >>> find("aaaaa", "bba") -1 """ m = len(haystack) n = len(needle) if m < n: return -1 if m == n == 0: return 0 for i in range(m-n+1): j = 0 done = False while not done and j < n: if needle[j] != haystack[i+j]: done = True j += 1 if not done: return i return -1
def _get_color_end_tex(entity_id: str) -> str: """ A message is added after an entity is colorized to assist the pipeline with error recovery. This allows the pipeline to scan the output of the TeX compiler to detect which entities were successfully colorized before errors were encountered. """ return rf"\scholarrevertcolor{{}}\message{{S2: Colorized entity '{entity_id}'.}}"
def _get_inner_type(typestr): """ Given a str like 'org.apache...ReversedType(LongType)', return just 'LongType' """ first_paren = typestr.find('(') return typestr[first_paren + 1:-1]
def _tuple2list(tupl): """Iteratively converts nested tuple to nested list. Parameters ---------- tupl : tuple The tuple to be converted. Returns ------- list The converted list of lists. """ return list((_tuple2list(x) if isinstance(x, tuple) else x for x in tupl))
def is_unique_chars_v1(str): """ Let N be the string length List construction: O(N) Set construction: O(N) len function: O(1) => O(N) time => O(N) space """ chars = list(str) unique_chars = set(chars) return len(chars) == len(unique_chars)
def dicts_equal(dictionary_one, dictionary_two): """ Return True if all keys and values are the same between two dictionaries. """ return all( k in dictionary_two and dictionary_one[k] == dictionary_two[k] for k in dictionary_one ) and all( k in dictionary_one and dictionary_one[k] == dictionary_two[k] for k in dictionary_two )
def formatTimeString(seconds:int) -> str: """ Converts seconds to a string with the format MM:SS. """ m, s = divmod(seconds, 60) return "{:02.0f} minute(s) {:02.0f} seconds".format(m, s)
def extract_capital_letters(x): """ Extract capital letters from string """ try: return "".join([s for s in x if s.isupper()]) except Exception as e: print(f"Exception raised:\n{e}") return ""
def adj_list_to_edges(adj_list): """ Turns an adjecency list in a list of edges (implemented as a list). Input: - adj_list : a dict of a set of weighted edges Output: - edges : a list of weighted edges (e.g. (0.7, 'A', 'B') for an edge from node A to node B with weigth 0.7) """ edges = [] for v, adjacent_vertices in adj_list.items(): for w, u in adjacent_vertices: edges.append((w, u, v)) return edges
def get_device_sleep_period(settings: dict) -> int: """Return the device sleep period in seconds or 0 for non sleeping devices.""" sleep_period = 0 if settings.get("sleep_mode", False): sleep_period = settings["sleep_mode"]["period"] if settings["sleep_mode"]["unit"] == "h": sleep_period *= 60 # hours to minutes return sleep_period * 60
def get_strongly_connected_components(edges, num_vertex): """ edges: {v: [v]} """ from collections import defaultdict reverse_edges = defaultdict(list) for v1 in edges: for v2 in edges[v1]: reverse_edges[v2].append(v1) terminate_order = [] done = [0] * num_vertex # 0 -> 1 -> 2 count = 0 for i0 in range(num_vertex): if done[i0]: continue queue = [~i0, i0] # dfs while queue: i = queue.pop() if i < 0: if done[~i] == 2: continue done[~i] = 2 terminate_order.append(~i) count += 1 continue if i >= 0: if done[i]: continue done[i] = 1 for j in edges[i]: if done[j]: continue queue.append(~j) queue.append(j) done = [0] * num_vertex result = [] for i0 in terminate_order[::-1]: if done[i0]: continue component = [] queue = [~i0, i0] while queue: i = queue.pop() if i < 0: if done[~i] == 2: continue done[~i] = 2 component.append(~i) continue if i >= 0: if done[i]: continue done[i] = 1 for j in reverse_edges[i]: if done[j]: continue queue.append(~j) queue.append(j) result.append(component) return result
def prod2( *args ): """ >>> prod2( 1, 2, 3, 4 ) 24 >>> prod2(*range(1, 10)) 362880 """ p= 1 for item in args: p *= item return p
def remove_shard_path(path): """ Remove the workflow name from the beginning of task, input and output names (if it's there). E.g. Task names {..}/{taskName}/shard-0 => {..}/{taskName}/ """ if not path: return None if "/shard-" in path: return path.split('/shard-')[0] return path
def largeGroupPositions(S): """ :type S: str :rtype: List[List[int]] """ d={} result=[] for i,string in enumerate(S): if string not in d: d[string]=[[i]] else: if d[string][-1][-1]+1==i: d[string][-1].append(i) else: d[string].append([i]) for key in d: for i in d[key]: if len(i) >=3: result.append([i[0],i[-1]]) return sorted(result,key=lambda result : result[0])
def block_number(row, column): """ Determines the block number in which the given row and column numbers intersects in sudoku args: -rows - Row number -column - Column number returns: Block number """ ele = str(row) + str(column) skeleton_matrix = [ ['00', '01', '02', '10', '11', '12', '20', '21', '22'], ['03', '04', '05', '13', '14', '15', '23', '24', '25'], ['06', '07', '08', '16', '17', '18', '26', '27', '28'], ['30', '31', '32', '40', '41', '42', '50', '51', '52'], ['33', '34', '35', '43', '44', '45', '53', '54', '55'], ['36', '37', '38', '46', '47', '48', '56', '57', '58'], ['60', '61', '62', '70', '71', '72', '80', '81', '82'], ['63', '64', '65', '73', '74', '75', '83', '84', '85'], ['66', '67', '68', '76', '77', '78', '86', '87', '88'] ] for i in skeleton_matrix: if ele in i: return skeleton_matrix.index(i)
def merge_hsbk(base, change): """Copy change on top of base, except when None.""" if change is None: return None return [b if c is None else c for b, c in zip(base, change)]
def _extract_license_outliers(license_service_output): """Extract license outliers. This helper function extracts license outliers from the given output of license analysis REST service. :param license_service_output: output of license analysis REST service :return: list of license outlier packages """ outliers = [] if not license_service_output: return outliers outlier_packages = license_service_output.get('outlier_packages', {}) for pkg in outlier_packages.keys(): outliers.append({ 'package': pkg, 'license': outlier_packages.get(pkg, 'Unknown') }) return outliers
def pattern_to_regex(pattern): """ Convert the CODEOWNERS path pattern into a regular expression string. """ orig_pattern = pattern # for printing errors later # Replicates the logic from normalize_pattern function in Gitlab ee/lib/gitlab/code_owners/file.rb: if not pattern.startswith('/'): pattern = '/**/' + pattern if pattern.endswith('/'): pattern = pattern + '**/*' # Convert the glob pattern into a regular expression: # first into intermediate tokens pattern = (pattern.replace('**/', ':REGLOB:') .replace('**', ':INVALID:') .replace('*', ':GLOB:') .replace('.', ':DOT:') .replace('?', ':ANY:')) if pattern.find(':INVALID:') >= 0: raise ValueError("Likely invalid pattern '{}': '**' should be followed by '/'".format(orig_pattern)) # then into the final regex pattern: re_pattern = (pattern.replace(':REGLOB:', '(?:.*/)?') .replace(':GLOB:', '[^/]*') .replace(':DOT:', '[.]') .replace(':ANY:', '.') + '$') if re_pattern.startswith('/'): re_pattern = '^' + re_pattern return re_pattern
def add_loc_offset(v_d, l, d_s): """Compute the real location from the default boxes location v_d and the scaled offset value l, learned by the network. Parameters ---------- v_d : float location of the default box, either v_x or v_y l : float scaled offset value as computed by the network d_s : float default box size value, either v_w or v_h Returns ------- float v_g, (if the network is correct) """ return v_d + l*d_s
def remainder(x, y): """Difference between x and the closest integer multiple of y. Return x - n*y where n*y is the closest integer multiple of y. In the case where x is exactly halfway between two multiples of y, the nearest even value of n is used. The result is always exact.""" from math import copysign, fabs, fmod, isfinite, isinf, isnan, nan try: x = float(x) except ValueError: raise TypeError("must be real number, not %s" % (type(x).__name__, )) y = float(y) # Deal with most common case first. if isfinite(x) and isfinite(y): if y == 0.0: # return nan # Merging the logic from math_2 in CPython's mathmodule.c # nan returned and x and y both not nan -> domain error raise ValueError("math domain error") absx = fabs(x) absy = fabs(y) m = fmod(absx, absy) # Warning: some subtlety here. What we *want* to know at this point is # whether the remainder m is less than, equal to, or greater than half # of absy. However, we can't do that comparison directly because we # can't be sure that 0.5*absy is representable (the mutiplication # might incur precision loss due to underflow). So instead we compare # m with the complement c = absy - m: m < 0.5*absy if and only if m < # c, and so on. The catch is that absy - m might also not be # representable, but it turns out that it doesn't matter: # - if m > 0.5*absy then absy - m is exactly representable, by # Sterbenz's lemma, so m > c # - if m == 0.5*absy then again absy - m is exactly representable # and m == c # - if m < 0.5*absy then either (i) 0.5*absy is exactly representable, # in which case 0.5*absy < absy - m, so 0.5*absy <= c and hence m < # c, or (ii) absy is tiny, either subnormal or in the lowest normal # binade. Then absy - m is exactly representable and again m < c. c = absy - m if m < c: r = m elif m > c: r = -c else: # Here absx is exactly halfway between two multiples of absy, # and we need to choose the even multiple. x now has the form # absx = n * absy + m # for some integer n (recalling that m = 0.5*absy at this point). # If n is even we want to return m; if n is odd, we need to # return -m. # So # 0.5 * (absx - m) = (n/2) * absy # and now reducing modulo absy gives us: # | m, if n is odd # fmod(0.5 * (absx - m), absy) = | # | 0, if n is even # Now m - 2.0 * fmod(...) gives the desired result: m # if n is even, -m if m is odd. # Note that all steps in fmod(0.5 * (absx - m), absy) # will be computed exactly, with no rounding error # introduced. assert m == c r = m - 2.0 * fmod(0.5 * (absx - m), absy) return copysign(1.0, x) * r # Special values. if isnan(x): return x if isnan(y): return y if isinf(x): # return nan # Merging the logic from math_2 in CPython's mathmodule.c # nan returned and x and y both not nan -> domain error raise ValueError("math domain error") assert isinf(y) return x
def sanitize_branch_name_for_rpm(branch_name: str) -> str: """ rpm is picky about release: hates "/" - it's an error also prints a warning for "-" """ offenders = "!@#$%^&*()+={[}]|\\'\":;<,>/?~`" for o in offenders: branch_name = branch_name.replace(o, "") return branch_name.replace("-", ".")