content
stringlengths
42
6.51k
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> bool: """Can `same` padding for given args be done statically?.""" return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
def same_thing(thing: object) -> object: """ post: __old__.thing == _ """ # If `thing` isn't copyable, it won't be available in `__old__`. # In this case, enforcement will fail with an AttributeError. return thing
def _split_quoted(text): """ Split a str string on *SPACE* characters. Splitting is not done at *SPACE* characters occurring within matched *QUOTATION MARK*s. *REVERSE SOLIDUS* can be used to remove all interpretation from the following character. :param str text: The string to split. :return: A two-tuple of str giving the two split pieces. """ quoted = False escaped = False result = [] for i, ch in enumerate(text): if escaped: escaped = False result.append(ch) elif ch == '\\': escaped = True elif ch == '"': quoted = not quoted elif not quoted and ch == ' ': return "".join(result), text[i:].lstrip() else: result.append(ch) return "".join(result), ""
def get_includes(args): """Parse a compiler argument list looking for includes.""" ret = set() for arg in args: if arg.startswith('/I'): ret.add(arg[2:]) return ret
def build_notch(x,y, notch_width, unitFactor, dir=1): """ draw a notch around the x value - dir=-1 means notch is on other side """ w_2 = notch_width/2 x1 = x - w_2 x2 = x + w_2 y2 = y + notch_width*dir path = 'L %s,%s L %s,%s' % (x1*unitFactor, y*unitFactor, x1*unitFactor, y2*unitFactor) path += 'L %s,%s L %s,%s' % (x2*unitFactor, y2*unitFactor, x2*unitFactor, y*unitFactor) return path
def dim_pow(dims, exp): """Create a new dimensionality tuple for the exponentiation of dims by exp. :param dims: The base dimensions. :type dims: ``tuple`` :param exp: The power to raise the base dimensions to. :type exp: ``numbers.Real`` :rtype: ``tuple`` """ return ( dims[0] * exp, dims[1] * exp, dims[2] * exp, dims[3] * exp, dims[4] * exp, dims[5] * exp, dims[6] * exp, )
def is_null_or_empty(string_val): """Null or empty string check. A utility method to check to see if the specified string is null or empty. :param str string_val: The value to check. :returns: True if null or empty; Otherwise, False. :rtype: bool """ if string_val and string_val.strip(): return False return True
def DD(in_: list): """ Outputs R-L, computes difference of last 2 elements of a list. """ out = in_[3] - in_[2] return out
def _get_url(method, api_url, api_version): """ Build the API URL. """ return "{url}/{version}/{method}.json".format( url=api_url, version=float(api_version), method=method )
def find_indices(nums, val): """Given a list, find the two indices that sum to a specific value""" for outer in range(len(nums)): for inner in range(len(nums)): if nums[outer] + nums[inner] == val: return (outer, inner) return False
def decorate_pid_for_redirect(pid, redirect_code=303, redirect_n=3): """Return a PID that will trigger redirects.""" return "<REDIRECT:{}:{}>{}".format(redirect_code, redirect_n, pid)
def union(a, b): """ Identifies the elements that are in A or B or in both. """ return list(set(a) | set(b))
def shout_all(word1, word2): """Return a tuple of strings""" # Concatenate word1 with '!!!': shout1 shout1 = word1 + '!!!' # Concatenate word2 with '!!!': shout2 shout2 = word2 + '!!!' # Construct a tuple with shout1 and shout2: shout_words shout_words = (shout1, shout2) # Return shout_words return shout_words
def ZigZagDecode(value): """Inverse of ZigZagEncode().""" if not value & 0x1: return value >> 1 return (value >> 1) ^ (~0)
def mtf_encode(buf): """ Performs move-to-front encoding which appends every new character to a list and references the position for every already-seen character in the buffer. """ lookup_list = [] encoding = [] for char in buf: if char not in lookup_list: lookup_list.append(char) # Append the new character and its list position to the encoding list encoding.extend((len(lookup_list), char)) elif char in lookup_list: index = lookup_list.index(char) # Get the character's index from the list encoding.append(index+1) # The +1 is because position indexing starts at 1 lookup_list.pop(index) # Remove the character at index 'index' lookup_list.insert(0,char) # Reposition the character to the top of the list return encoding
def should_omit(names, artifact): """Return true if the given artifact matches on of the patterns given in [names]. Foreach name in the given list, see if it is either (1) a substring of the maven coordinate or (2) equal to the workspace name. For example, 'org.scala-lang' will return true for all artifacts in that group, and 'org_scala_lang_scala_reflect' will true true for that specific workspace. Args: name: string_list - a list of string patterns to test. artifact: !Artifact - the artifact object to match Returns: boolean """ for name in names: if name in artifact["coordinate"]: return True if name == artifact["ws_name"]: return True return False
def first_non_whitespace_char(string): """ Simple function that checks to see what the first non whitespace char is :param string: the string that is going to be checked :return: returns the first non whitespace char of string """ char = None for i in range(len(string)): if string[i] != ' ': char = string[i] break return char
def indent(text, level): """ Indent each line in a string by prepending whitespace """ return "\n".join([" " * (4 * level) + line for line in text.split("\n")])
def get_collections(profiles): """Get a list of all the data_types in the system.""" supported_collections = list(profiles.keys()) supported_collections = [s.lower() for s in list(profiles.keys())] return supported_collections
def parseMembers(members_json): """ Handle member lists from AoC leaderboard """ # Get member name, score and stars members = [(m.get('name', 'unknown'), m.get('local_score', 0), m.get('stars', 0) ) for m in members_json.values()] # Sort members by score, descending members.sort(key=lambda s: (-s[1], -s[2])) return members
def parse_begins(msg): """Parse the guard ID out of the "begins his shift" message.""" words = msg.split() return int(words[1][1:])
def GetDict(data, key_path, default_value=None): """A simplified getter function to retrieve values inside dictionary. This function is very similar to `dict.get`, except it accepts a key path (can be a list or string delimited by dot, for example ['a', 'b'] or 'a.b') Args: data: A dictionary that may contain sub-dictionaries. key_path: A list of keys, or one simple string delimited by dot. default_value: The value to return if key_path does not exist. """ if isinstance(key_path, str): key_path = key_path.split('.') for key in key_path: if key not in data: return default_value data = data[key] return data
def filter_enabled_jobs(jobs): """Given a list of chronos jobs, find those which are not disabled""" return [job for job in jobs if job['disabled'] is False]
def list_format(items, fmt): """format each item in a list""" out = [] for i in items: out.append(fmt.format(i)) return out
def _process_bene(list_, dict_, keyword): """ This function processes the BENE part of the initialization file. """ # Distribute information name, val_treated, val_untreated = list_[0], list_[1], list_[2] # Initialize dictionary if 'TREATED' not in dict_.keys(): for subgroup in ['TREATED', 'UNTREATED']: dict_[subgroup] = {} dict_[subgroup]['coeff'] = [] dict_[subgroup]['int'] = None dict_[subgroup]['sd'] = None # Type conversion val_treated = float(val_treated) val_untreated = float(val_untreated) # Collect information if name in ['coeff']: dict_['TREATED'][name] += [val_treated] dict_['UNTREATED'][name] += [val_untreated] else: dict_['TREATED'][name] = val_treated dict_['UNTREATED'][name] = val_untreated # Finishing return dict_
def int_to_bits(value, length): """ >>> int_to_bits(1, 3) (1, 0, 0) >>> int_to_bits(7, 2) (1, 1) """ return tuple((value >> i) % 2 for i in range(length))
def uppercase_first_letter(s): """Return string "s" with first character upper case.""" return s[0].upper() + s[1:]
def get_ref(record): """Get the name of a VPC, or its ID if it has no name. Args: record A VPC record returned by AWS. Returns: The VPC's name, or its ID if it has no name. """ ref = record["VpcId"] tags = record.get("Tags") if tags: name_tags = [x for x in tags if x["Key"] == "Name"] if name_tags: ref = name_tags[0]["Value"] return ref
def print_nums_opt(*numbers: int) -> str: """Fill numbers with zeros (optimizible). Args: *numbers: <int> sequence of numbers Returns: zero filled string. Examples: >>> assert print_nums_opt(11, 12, 23) == '11 12 23' """ return ' '.join(str(num).zfill(len(max(str(num)))) for num in numbers)
def apk(actual, predicted, k=100): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual: #and p not in predicted[:i]: # not necessary for us since we will not make duplicated recs num_hits += 1.0 score += num_hits / (i+1.0) # we handle this part before making the function call #if not actual: # return np.nan return score / min(len(actual), k)
def encrypt(message, mapping): """Write a function that implements a substitution cipher. In a substitution cipher one letter is substituted for another to garble the message. For example A -> Q, B -> T, C -> G etc. your function should take two parameters, the message you want to encrypt, and a string that represents the mapping of the 26 letters in the alphabet. Your function should return a string that is the encrypted version of the message.""" # It's late and i opted for the lazy version: return ''.join([chr(ord(i)+mapping) for i in message])
def len_occ(occ): """Compute length of the occurrence pseudo list We use pairs and not regular lists because we want a hashable non mutable element. """ count = 0 while isinstance(occ, tuple) and len(occ) == 2: count = count + 1 occ = occ[1] return count
def cf_to_dec(cf): # pragma: no cover """Compute decimal form of a continued fraction. Parameters ---------- cf : array-like coefficients of continued fraction. Returns ------- x : float floating point representation of cf """ if len(cf) == 1: return cf[0] else: return cf[0] + 1 / cf_to_dec(cf[1:])
def String(query_string): """ A criterion used to search for objects using Elasticsearch querystring syntax. For example * search for case using `title:misp AND tlp:2` Arguments: tpe (str): object type's name as defined in TheHive: Possible values: `all`, `case`, `case_task`, `case_task_log`, `case_artifact`, `alert`, `case_artifact_job`, `audit` Returns: dict: JSON repsentation of the criterion ```python # Query to search for casee with TLP:AMBER and having the word 'misp' on the title query = String('title:misp AND tlp:2') ``` produces ```json { "_string": "title:misp AND tlp:2" } ``` !!! Warning This criterion is deprecated and won't be ported to TheHive 4 !!! Warning This criterion is available in TheHive 3 ONLY """ return {'_string': query_string}
def addHtmlBreaks(s, isXhtml=True): """Replace all returns by <br/> or <br>.""" tag = {True:'<br/>\n', False:'<br>\n'}[isXhtml] return s.replace('\n',tag)
def solve(passphrases): """Calculate number of valid passphrases. :passphrases: string of passphrases, separated by newlines :return: number of valid passphrases >>> solve('aa bb cc dd ee') 1 >>> solve('aa bb cc dd aa') 0 >>> solve('aa bb cc dd aaa') 1 """ return sum(len(words) == len(set(words)) for words in (passphrase.split() for passphrase in passphrases.split('\n')))
def stringfyList(oneList: list) -> str: """Stringfies a list @type oneList: list @param oneList: A list to be stringed @returns str: The stringed list """ output = '' for i in range(len(oneList)-1): output += f"{oneList[i]}," output += oneList[-1] return output
def filter_queue_max(locations, max_len): """Given an iterable of locations, will return a list of those with queues that are not too long. Args: locations (iterable): a list of objects inheriting from the covid19sim.base.Location class Returns: list """ return [loc for loc in locations if len(loc.queue) <= max_len]
def format_bytes(n) -> str: """Format bytes as text >>> format_bytes(1) '1 B' >>> format_bytes(1234) '1.23 kB' >>> format_bytes(12345678) '12.35 MB' >>> format_bytes(1234567890) '1.23 GB' >>> format_bytes(1234567890000) '1.23 TB' >>> format_bytes(1234567890000000) '1.23 PB' (taken from dask.distributed, where it is not exported) """ if n > 1e15: return "%0.2f PB" % (n / 1e15) if n > 1e12: return "%0.2f TB" % (n / 1e12) if n > 1e9: return "%0.2f GB" % (n / 1e9) if n > 1e6: return "%0.2f MB" % (n / 1e6) if n > 1e3: return "%0.2f kB" % (n / 1000) return "%d B" % n
def findFeature(cov, chrom, start, end): """ Judge if the target region overlap with features. """ if chrom not in cov: return False for t in range(start, end): if t in cov[chrom]: return True return False
def delete_brackets(s): """ Delete all brackets in out page content :param s: our page content :return new page content without brackets """ stack = [] i = 0 size = len(s) while i < size - 1: c = s[i] if i == size - 2: return s if c == '{' and s[i + 1] == '{': stack.append(('{', i)) i += 2 if c == '}' and s[i + 1] == '}': if len(stack) == 1: start_index = stack.pop()[1] s = s[: start_index] + s[i + 2:] i = start_index size = len(s) else: if stack: stack.pop() else: s = s[: i] + s[i + 2:] size = len(s) i += 2 else: i += 1 return s
def brightness_to_percentage(byt): """Convert brightness from absolute 0..255 to percentage.""" return int((byt*100.0)/255.0)
def dim(x): """ Determines the dimensionality of an array; A helper function for update_picture. """ d = 0 el = x while True: try: el = el[0] d += 1 except: break return d
def str_to_state(str_state): """ Reads a sequence of 9 digits and returns the corresponding state. """ assert len(str_state) == 9 and sorted(str_state) == list('012345678') return tuple(int(c) for c in str_state)
def compare_balance_with_zero(balance): """ :param balance: a double with the value of the balance in after a year. :return: 0 if the balance is equal to zero or nearly equal, 1 if the balance is greater than zero and -1 if the balance is lower than zero. """ if 0.05 >= balance >= -0.05: return 0 elif balance > 0.05: return 1 else: return -1
def recognize_greeting(statement): """Recognizes if string statement starts with Hi or Hey or any other greeting. Args: statement (str): a string from the commandline from the user Returns: bool: True if statement is a greeting, False otherwise >>> recognize_greeting('hi') True """ statement = statement.lower() if statement.startswith('hi') or statement.startswith('hello'): return True print('Hi, how are you?') else: return False
def set_accuracy_95(num): """Reduce floating point accuracy to 9.5 (xxxx.xxxxx). :param float num: input number :returns: float with specified accuracy """ return float("{:9.5f}".format(num))
def timestr( time ) : """ convert an int time into a string suitable for use as an object parameter, replacing + with p and - with m """ if time>=0 : pm = 'p' else : pm = 'm' return( '%s%02i'%(pm,abs(time) ) )
def classifier_wrapper(classifier, classifier_type, test_sample): """ This function is used as a wrapper to pattern classification. ARGUMENTS: - classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble. RandomForestClassifier or sklearn.ensemble. GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier - classifier_type: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees" - test_sample: a feature vector (np array) RETURNS: - R: class ID - P: probability estimate EXAMPLE (for some audio signal stored in array x): import audioFeatureExtraction as aF import audioTrainTest as aT # load the classifier (here SVM, for kNN use load_model_knn instead): [classifier, MEAN, STD, classNames, mt_win, mt_step, st_win, st_step] = aT.load_model(model_name) # mid-term feature extraction: [mt_features, _, _] = aF.mtFeatureExtraction(x, Fs, mt_win * Fs, mt_step * Fs, round(Fs*st_win), round(Fs*st_step)); # feature normalization: curFV = (mt_features[:, i] - MEAN) / STD; # classification [Result, P] = classifierWrapper(classifier, model_type, curFV) """ class_id = -1 probability = -1 if classifier_type == "knn": class_id, probability = classifier.classify(test_sample) elif classifier_type == "svm" or \ classifier_type == "randomforest" or \ classifier_type == "gradientboosting" or \ classifier_type == "extratrees" or \ classifier_type == "svm_rbf": class_id = classifier.predict(test_sample.reshape(1, -1))[0] probability = classifier.predict_proba(test_sample.reshape(1, -1))[0] return class_id, probability
def find_subpath(path,subpath): """ Find a subpath in the band structure path Given the overall band structure as a list of tuples of labels and x-values and given a list of symmetry point labels, return the section of the path that matches the subpath. For example, if the band structure path is [("G",0.0),("H",0.18),("P",0.34),("G",0.50),("N",0.63)] and subpath is ["H","P","G"] then return [("H",0.18),("P",0.34),("G",0.50)] If subpath does not correspond to a consecutive subsequence of path return None. For example is subpath is ["H","G"] """ lpath = len(path) lsubpath = len(subpath) ldiff = lpath - lsubpath for ii in range(0,ldiff+1): jj = 0 retpath = [] match = True while jj < lsubpath and match: tuple = path[ii+jj] (pathlabel,pathval) = tuple subpathlabel = subpath[jj] match = match and (pathlabel == subpathlabel) retpath.append(tuple) jj = jj+1 if match: return retpath return None
def _pad_sequence_fix(attr): """Changing onnx's pads sequence to match with mxnet's pad_width mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end) onnx: (x1_begin, x2_begin, ... , xn_end, xn_end)""" new_attr = () if len(attr) % 2 == 0: for index in range(int(len(attr) / 2)): new_attr = new_attr + attr[index::int(len(attr) / 2)] return new_attr
def is_float(target): """Return True if str target is float-like.""" try: float(target) return True except Exception: return False
def _non_string_elements(x): """ Simple helper to check that all values of x are string. Returns all non string elements as (position, element). :param x: Iterable :return: [(int, !String), ...] """ problems = [] for i in range(0, len(x)): if not isinstance(x[i], str): problems.append((i, x[i])) return problems
def overlay(given, defaults): """Pull elements from given until we run out, then use defaults also, use defaults if an element in given is empty (that is, None or '') """ l1 = len(given) l2 = len(defaults) rem = l2 - l1 res = [] for i in range(min(l1,l2)): if given[i] in [None, '']: res.append(defaults[i]) else: res.append(given[i]) if l1 < l2: res.extend(defaults[l2 - rem:]) return res
def is_packed_array(type_name): """ Those are types for which we add our extra packed array functions. """ return type_name in [ "PackedByteArray", "PackedColorArray", "PackedFloat32Array", "PackedFloat64Array", "PackedInt32Array", "PackedInt64Array", "PackedStringArray", "PackedVector2Array", "PackedVector3Array", ]
def detokenize(sent): """ Undo wordpiece tokenization. """ new_sent = [] for i, tok in enumerate(sent): if tok.startswith("##"): if i > 0: new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:] else: new_sent.append(tok[2:]) else: new_sent.append(tok) return new_sent
def chr_split_list(split_headers): """ Gets chromosome info from split_headers. Args: split_headers (list): header list split into parts Returns: chr_list (list): list of chromosome values """ chr_list = [] for item in split_headers: if item[0]: new_item = item[0].split(":") chr_list.append(new_item) return chr_list
def check_invalid( rules, _ , nearby_tickets, part2=False ): """ From the given rules, find the invalid entries in the nearby tickets. The entries in the tickets should be in the set of possible entries given in the rules dictionary. This function returns the sum of the invalid entries. """ invalid = [] for ticket in nearby_tickets: entries_rules = list( rules.values() ) admissible_entries = entries_rules[0].union( *entries_rules[1:] ) invalid.append( [ entry for entry in ticket if entry not in admissible_entries ] ) if not part2: return sum( [ subitem for item in invalid for subitem in item ] ) else: return invalid
def pluralise(word: str, number: int) -> str: """1 banana, 2 bananas""" return "%d %s%s" % (number, word, "" if number == 1 else "s")
def transcodeWord(word, morseDict): """ Converts a alphabetic letter to morse code string value """ morseList = [] for letter in word: morseLetter = morseDict.get(letter.lower()) morseList.append(morseLetter) return morseList
def translate_severity(severity): """ Translate from Jask insight severity to Demisto severity """ if severity <= 4: return severity return 4
def insertion_sort(lst): """ A stable, in-place sorting algorithm that sorts the list in increasing order, one element at a time. :param lst: An unsorted list of sortable items (i.e. the items can be compared using the 'greater-than' operator [>]). :type lst: list :return: The list sorted in increasing order. :rtype: list """ n = len(lst) for i in range(1, n): j = i while j > 0 and lst[j - 1] > lst[j]: lst[j - 1], lst[j] = lst[j], lst[j - 1] j -= 1 return lst
def convert_from_node_parameter(args_in, from_par=None): """ Convert from_node and resolve from_parameter dependencies.""" if not isinstance(args_in, list): args_in = [args_in] for k, item in enumerate(args_in): if isinstance(item, dict) and 'from_node' in item: args_in[k] = '_' + item['from_node'] if from_par and isinstance(item, dict) and 'from_parameter' in item: if item['from_parameter'] == 'x': args_in[k] = '_' + from_par['data'] # This fixes error when using the apply process else: args_in[k] = '_' + from_par[item['from_parameter']] if len(args_in) == 1: args_in = args_in[0] return args_in
def filter_username(username) -> str: """ Removes '/u/' and 'u/' from a username. :return: The filtered username. """ return username.replace("/u/", "").replace("u/", "")
def rm_duplicates(lis): """ Takes a list, removes duplicates and returns it sorted. """ a_set = set(lis) return sorted(a_set)
def color_scale(hex_str, factor): """Scales a hex string by `factor`. Returns scaled hex string.""" hex_str = hex_str.strip('#') if factor < 0 or len(hex_str) != 6: return hex_str r, g, b = int(hex_str[:2], 16), int(hex_str[2:4], 16), int(hex_str[4:], 16) def clamp(val, minimum=0, maximum=255): if val < minimum: return minimum if val > maximum: return maximum return int(val) r = clamp(r * factor) g = clamp(g * factor) b = clamp(b * factor) return "#%02x%02x%02x" % (r, g, b)
def letter_grades(highest): """ :param highest: integer of highest exam score. :return: list of integer lower threshold scores for each D-A letter grade interval. For example, where the highest score is 100, and failing is <= 40, The result would be [41, 56, 71, 86]: 41 <= "D" <= 55 56 <= "C" <= 70 71 <= "B" <= 85 86 <= "A" <= 100 """ score_diff = round((highest - 41) / 4) counter = 41 scores = [counter] for _ in range(3): counter += score_diff scores.append(counter) return scores
def truncate(seq: str, k: int) -> str: """ Truncate a sequence to even division by k """ length = len(seq) end = length - (length % k) return seq[:end]
def N(u,i,p,knots): """ u: point for which a spline should be evaluated i: spline knot p: spline order knots: all knots Evaluates the spline basis of order p defined by knots at knot i and point u. """ if p == 0: if knots[int(i)] < u and u <=knots[int(i+1)]: return 1.0 else: return 0.0 else: try: k = ((float((u-knots[int(i)])) / float((knots[int(i+p)] - knots[int(i)]) )) * N(u,i,p-1,knots)) except ZeroDivisionError: k = 0.0 try: q = ((float((knots[int(i+p+1)] - u)) / float((knots[int(i+p+1)] - knots[int(i+1)]))) * N(u,i+1,p-1,knots)) except ZeroDivisionError: q = 0.0 return float(k + q)
def _get_chunks(text, length=200, overlap=50): """Splits text into chunks Args: text (str): Text length (int, optional): Chunk length. Defaults to 200 overlap (int, optional): Chunk overlap. Defaults to 50 Return: list: Text chunks """ l_total = [] l_partial = [] text_split = text.split() n_words = len(text_split) splits = n_words // (length - overlap) + 1 if n_words % (length - overlap) == 0: splits = splits - 1 if splits == 0: splits = 1 for split in range(splits): if split == 0: l_partial = text_split[:length] else: l_partial = text_split[ split * (length - overlap) : split * (length - overlap) + length ] l_final = " ".join(l_partial) if split == splits - 1: if len(l_partial) < 0.75 * length and splits != 1: continue l_total.append(l_final) return l_total
def ip_num_active_elements(objects): """ Returns the number of active elements correspponding to ``objects`` Parameters ========== objects : list of tuple of indices of edges Notes ===== In the computation of the independence polynomial an object is a tuple of edges starting at a vertex. Different vertex ordering correspond to different ``objects``; for each ordering there is a number of active elements, which is the maximum number ``nu`` of ``eta`` elements appearing in the computation of the independence polynomial; its complexity depends from ``2**nu``. """ ac = set() nu = 0 for obj in objects: for i in obj: if i in ac: ac.remove(i) else: ac.add(i) if nu < len(ac): nu = len(ac) return nu
def get_build_list(json_data: dict) -> list: """Return a list of build IDs for a device""" builds = [] for firmware in json_data["firmwares"]: builds.append(firmware["buildid"]) return builds
def to_loki_date(date): """ Returns a date in YYYY-MM-DD format. """ if date == '': return "0000-00-00" else: chunked = date.split('/') for i, chunk in enumerate(chunked): if len(chunk) == 1: chunked[i] = '0' + chunk if len(chunked[2]) == 2: formatted = '-'.join(['20' + chunked[2], chunked[0], chunked[1]]) else: formatted = '-'.join([chunked[2], chunked[0], chunked[1]]) return formatted
def get_unique_peptides(peptides:list) -> list: """ Function to return unique elements from list. Args: peptides (list of str): peptide list. Returns: list (of str): list of peptides (unique). """ return list(set(peptides))
def check_connect_4(board: list, x: int, y: int) -> int: """ @param board: the board data. @param x: column index. @param y: row index. """ rows = len(board) cols = len(board[0]) tile = board[y][x] i, j, count = y-1, y+1, 1 while i >= 0 or j < rows: count += 1 if i >= 0 and board[i][x] == tile else 0 count += 1 if j < rows and board[j][x] == tile else 0 i -= 1 j += 1 if count >= 4: return count i, j, count = x-1, x+1, 1 while i >= 0 or j < cols: count += 1 if i >= 0 and board[y][i] == tile else 0 count += 1 if j < cols and board[y][j] == tile else 0 i -= 1 j += 1 if count >= 4: return count i, j, count = x-1, y-1, 1 while i >= 0 and j >= 0: count += 1 if board[j][i] == tile else 0 count += 1 if board[j][i] == tile else 0 i -= 1 j -= 1 i, j = x+1, y+1 while i < cols and j < rows: count += 1 if board[j][i] == tile else 0 count += 1 if board[j][i] == tile else 0 i += 1 j += 1 if count >= 4: return count return 0
def _make_bbox(bbox): """Formats bounding box for use in MountainHub API. Keyword arguments: box -- Dictionary used to construct bounding box """ if bbox is None: return {} return { 'north_east_lat': bbox['latmax'], 'north_east_lng': bbox['lonmax'], 'south_west_lat': bbox['latmin'], 'south_west_lng': bbox['lonmin'] }
def construct_references(ref): """Return a title reference object""" reference = {} # Loop through each object in the catalogue records for key, value in ref.items(): if key != 'records': sorteditems = [ (k, ref[key][k]) for k in sorted(ref[key], key=ref[key].get, reverse=True) ] reference[key] = ['Other'] for i, val in enumerate(sorteditems): reference[key].append(val[0]) return reference
def encode_region(region): """Encode a region as a string.""" if len(region) == 0: raise ValueError('Cannot encode an empty region.') elif isinstance(region[0], str): region_str = '-'.join(region) else: region_str = '_'.join('-'.join(segment) for segment in region) return region_str
def parse_str(feature): """ returns dict 'id', 'category' - required keys 'dist_meters' - distance from point in search """ res = { 'id' : feature['id'], 'category' : 'adr_street', 'str_name' : feature['properties']['name'], 'str_type' : feature['properties']['type'], 'stl_name' : feature['properties']['settlement'], 'stl_type' : feature['properties']['settlement_type'], 'stl_id' : feature['properties']['settlement_id'], 'dist_meters' : feature['properties']['dist_meters'] } try: res['geometry'] = feature['geometry'] except: pass return res
def twoSum(nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ for i in range(len(nums)): for j in range(i+1,len(nums)): if nums[i]+nums[j]==target: return [i,j]
def _fullname(o): """Object's full name.""" return o.__class__.__module__ + "." + o.__class__.__name__
def split_ip_range(ip_range): """ This function will split ip_range into a list of all IPs in that range. ip_range is in an IP address range split by a hyphen (e.g., "10.0.0.1-10.0.0.9"). """ from ipaddress import ip_address ip_list = ip_range.split("-") ip_0 = ip_list[0] ip_1 = ip_list[1] ip_start = int(ip_address(ip_0).packed.hex(), 16) ip_end = int(ip_address(ip_1).packed.hex(), 16) return [ip_address(ip).exploded for ip in range(ip_start, ip_end + 1)]
def _automatic_mount_only(fs): """ Filter that returns true if the filesystem is automount enabled """ return fs['automaticMountOption'] in ('yes', 'automount')
def best_match(s1, s2): """ :param s1: str, the long DNA sequence. :param s2: str, the short DNA sequence. :return: The program will find the best match and return it as the best_ans. """ point = 0 # Point is to score the similarity (how much it matched) to the long sequence. highest_point = 0 # The highest point will be kept in this variable. best_ans = '' # The match that obtain the highest score. for i in range(len(s1) - len(s2) + 1): # compare s2 to each section of s1. for j in range(len(s2)): # compare each letter in s2 to the letter in s1. if s2[j] == s1[i+j]: # The letter matches. point += 1 if point >= highest_point: # A new best match is found. highest_point = point # The highest score will be replaced. best_ans = s1[i:i+(len(s2))] # best ans will be shown as a string. point = 0 # point will return to zero after a round. return best_ans
def severity_string_to_num(severity_str: str) -> int: """ :param severity_str: Severity score as String :return: Returns the Integer representation of the severity score """ if severity_str == 'Info': return 1 elif severity_str == 'Low': return 2 elif severity_str == 'Medium': return 3 elif severity_str == 'High': return 4 elif severity_str == 'Critical': return 5 return -1
def filter_unwanted_characters(expression): """ Check it there are unwanted characters in the decoded expression. :param expression: :return: string """ allowed_symbols = ["+", "-", "/", "*", "(", ")"] string = "".join(filter( lambda x: (not str(x).isdigit() and x not in allowed_symbols), expression)) print(string) return string
def weight_average(my_list=[]): """Return the weighted average of all integers in a list of tuples.""" if not isinstance(my_list, list) or len(my_list) == 0: return (0) avg = 0 size = 0 for tup in my_list: avg += (tup[0] * tup[1]) size += tup[1] return (avg / size)
def migrate_types(data): """Fix data types.""" type_mapping = { 'dcterms:creator': ['prov:Person', 'schema:Person'], 'schema:Person': ['prov:Person', 'schema:Person'], str(sorted(['foaf:Project', 'prov:Location'])): [ 'prov:Location', 'schema:Project' ], 'schema:DigitalDocument': [ 'prov:Entity', 'schema:DigitalDocument', 'wfprov:Artifact' ] } def replace_types(data): for key, value in data.items(): if key == '@type': if not isinstance(value, str): value = str(sorted(value)) new_type = type_mapping.get(value) if new_type: data[key] = new_type elif isinstance(value, dict): replace_types(value) elif isinstance(value, (list, tuple, set)): for v in value: if isinstance(v, dict): replace_types(v) replace_types(data) return data
def replace(template, pattern, subst) : """If pattern in the template replaces it with subst. Returns str object template with replaced patterns. """ fields = template.split(pattern, 1) if len(fields) > 1 : return '%s%s%s' % (fields[0], subst, fields[1]) else : return template
def build_complement(dna): """ find the complement of the input dna data The complement of A is T The complement of T is A The complement of C is G The complement of G is C :param dna: is string, the import of dna data :return: is string, the complement of the import dna data """ ans = '' for i in dna: # for character in the dna data if i == 'A': # if one of the dna data is A ans += 'T' # the complement of A is T elif i == 'T': # if one of the dna data is T ans += 'A' # the complement of T is A elif i == 'C': # if one of the dna data is C ans += 'G' # the complement of C is G elif i == 'G': # if one of the dna data is G ans += 'C' # the complement of G is C return ans
def parameter_code_sizer(opcode, raw_parameter_code_list): """Ensures parameter code list is the correct length, according to the particular opcode.""" parameter_lengths = {1:3, 2:3, 3: 1, 4: 1, 5: 2, 6:2, 7: 3, 8:3, 9:1, 99:0} while len(raw_parameter_code_list) < parameter_lengths[opcode]: raw_parameter_code_list.append(0) return raw_parameter_code_list
def concat(string1, string2): """Concatenate two strings.""" return ' '.join([string1, string2])
def add_prefix_un(word): """ Make negative, or "not" words by adding un to them :param word: str of a root word :return: str of root word with un prefix This function takes `word` as a parameter and returns a new word with an 'un' prefix. """ return 'un' + word
def _check_assign(value, unknown, default): """Check for the unknown flag and return the correct value.""" if value != unknown: return value else: return default
def approx_Q_top(A, B, T, sigma=1, C=None): """ Approximate expression for the (a)symmetric top partition function. The expression is adapted from Gordy and Cook, p.g. 57 equation 3.68. By default, the prolate top is used if the C constant is not specified, where B = C. Oblate case can also be specified if A = C. Parameters ---------- A - float Rotational constant for the A principal axis, in MHz. B - float Rotational constant for the B principal axis, in MHz. T - float Temperature in Kelvin sigma - int Rotational level degeneracy; i.e. spin statistics C - float, optional Rotational constant for the C principal axis, in MHz. Defaults to None, which will reduce to the prolate top case. Returns ------- Q - float Partition function for the molecule at temperature T """ if C is None: # For a symmetric top, B = C C = B Q = (5.34e6 / sigma) * (T**3. / (A * B * C))**0.5 return Q
def isabs(path): """Return True if path is an absolute path.""" return path.startswith('/')
def decorate(name, type): """ Decorate a parameter. >>> decorate("FOO", "arg") 'FOO' >>> decorate("FOO", "flag") '[FOO]' >>> decorate("FOO", "param") '[FOO <foo>]' >>> decorate("FOO", "group") '[FOO <foo> ...]' >>> decorate("foo", "argn") '[<foo> ...]' """ if type == "arg": return name elif type == "flag": return "[%s]" % name elif type == "param": return "[%s <%s>]" % (name, name.lower()) elif type == "group": return "[%s <%s> ...]" % (name, name.lower()) elif type == "argn": return "[<%s> ...]" % (name) elif type == "example": # \example is handled by gen_example_rst return None return None
def bytes_to_int(data): """ Convert bytes to integer :param data: bytes of data :return: result int """ result = 0 for b in data: result = result * 256 + int(b) return result
def slide_buf(arr, data, size): """ arr - byte array data - byte array """ arr.append(data) arr = arr[-size:] return arr
def sol_rad_from_sun_hours(daylight_hours, sunshine_hours, et_rad): """ Calculate incoming solar (or shortwave) radiation, *Rs* (radiation hitting a horizontal plane after scattering by the atmosphere) from relative sunshine duration. If measured radiation data are not available this method is preferable to calculating solar radiation from temperature. If a monthly mean is required then divide the monthly number of sunshine hours by number of days in the month and ensure that *et_rad* and *daylight_hours* was calculated using the day of the year that corresponds to the middle of the month. Based on equations 34 and 35 in Allen et al (1998). :param dl_hours: Number of daylight hours [hours]. Can be calculated using ``daylight_hours()``. :param sunshine_hours: Sunshine duration [hours]. :param et_rad: Extraterrestrial radiation [MJ m-2 day-1]. Can be estimated using ``et_rad()``. :return: Incoming solar (or shortwave) radiation [MJ m-2 day-1] :rtype: float """ # _check_day_hours(sunshine_hours, 'sun_hours') # _check_day_hours(daylight_hours, 'daylight_hours') # 0.5 and 0.25 are default values of regression constants (Angstrom values) # recommended by FAO when calibrated values are unavailable. return (0.5 * sunshine_hours / daylight_hours + 0.25) * et_rad