content
stringlengths
42
6.51k
def _iframe(src, width=650, height=365, content=None, link=None): """Create an iframe html snippet.""" html = ( '<iframe width="%s" height="%s" src="%s" ' 'frameborder="0" allowfullscreen></iframe>' ) % (width, height, src) if not content: return html if link: content = '<a href="%s">%s</a>' % (link, content) return '<figure>%s<figcaption>%s</figcaption></figure>' % ( html, content )
def linecount(doc: str, end: int, start: int = 0): """Returns the number of lines (by counting the number of newline characters \\n, with the first line being line number one) in the string *doc* between the positions *start* and *end*. """ return (doc.count('\n', start, end) + 1)
def __discount_FP(i, n): """ Future worth factor (compound amount f actor) Factor: (F/P, i, N) Formula: F=P(1+i^N :param i: :param n: :return: Cash Flow: F | | -------------- | P """ return (1 + i) ** n
def _TransformOperationState(metadata): """Extract operation state from metadata.""" if 'status' in metadata: return metadata['status']['state'] elif 'state' in metadata: return metadata['state'] return ''
def _invoke_member(obj, membername, *args, **kwargs): """Retrieve a member of an object, then call it with the provided arguments. Args: obj: The object to operate on. membername: The name of the member to retrieve from ojb. args: Positional arguments to pass to the method. kwargs: Keyword arguments to pass to the method. Returns: The return value of the method invocation. """ return getattr(obj, membername)(*args, **kwargs)
def col_num_to_string(n): """ Converts a column number (e.g. 3) into an excel-like column name (e.g. C) """ string = "" while n > 0: n, remainder = divmod(n - 1, 26) string = chr(65 + remainder) + string return string
def checksumStr(data): """Take a NMEA 0183 string and compute the checksum. @param data: NMEA message. Leading ?/! and training checksum are optional @type data: str @return: hexadecimal value @rtype: str Checksum is calculated by xor'ing everything between ? or ! and the * >>> checksumStr("!AIVDM,1,1,,B,35MsUdPOh8JwI:0HUwquiIFH21>i,0*09") '09' >>> checksumStr("AIVDM,1,1,,B,35MsUdPOh8JwI:0HUwquiIFH21>i,0") '09' """ # FIX: strip off new line at the end too if data[0]=='!' or data[0]=='?': data = data[1:] if data[-1]=='*': data = data[:-1] if data[-3]=='*': data = data[:-3] # FIX: rename sum to not shadown builting function checksum = 0 for c in data: checksum = checksum ^ ord(c) sum_hex = "%x" % checksum if len(sum_hex) == 1: sum_hex = '0' + sum_hex return sum_hex.upper()
def order_of_execution(count: int, k: int): """ This one is linear time. :param count: :param k: :return: """ actions = 0 r = [*range(1, 1 + count, 1)] removed = [] pos = 0 while len(r) != 0: pos = (pos + (k - 1)) % len(r) removed.append(r.pop(pos)) actions += 1 print(f"actions: {actions}") return removed
def parse_input(event): """Parses all input required from step function.""" input_request = event["input"] return { "batch_id": input_request["transformation_step_output"]["batch_id"], "output_sns_arn": input_request.get("destinationSnsArn"), "execution_id": event["execution_id"], }
def average(a, b): """ Given two numbers a and b, return their average value. Parameters ---------- a : number A number b : number Another number Returns ------- res : number The average of a and b, computed using 0.5*(a + b) Example ------- >>> average(5, 10) 7.5 """ return (a + b) * 0.5
def insertion_sort(A): """Sort list of comparable elements into nondecreasing order.""" for k in range(1, len(A)): # from 1 to n-1 cur = A[k] # current element to be inserted j = k # find correct index j for current while j > 0 and A[j - 1] > cur: # element A[j-1] must be after current A[j] = A[j - 1] j -= 1 A[j] = cur # cur is now in the right place return A
def find_dependency_in_spec(spec, ref): """Utility to return the dict corresponding to the given ref in a dependency build spec document fragment """ for item in spec: if item['ref'] == ref: return item
def jaccard_similarity(x, y): """ set implementation of jaccard similarity """ intersection_cardinality = len(set.intersection(*[set(x), set(y)])) union_cardinality = len(set.union(*[set(x), set(y)])) return intersection_cardinality / float(union_cardinality)
def preprocess(arr): """ Args: data is a list of token positions wher a position is a form of list [d_model, idx1, idx2, idx3 ,...] in which d_model means the length of the position list, "idx1", "idx2", "idx3" are the the indexes that correpsonding value is 1. Returns: a 2D dimention (seq_len, d_model=215) to represent positions """ results = [] for token_paths in arr: _token_paths = [] for token_path in token_paths: token, path = token_path.split('@') path = path.split(u'\u2191') _token_paths.append(path) results.append(_token_paths) return results
def col_num_equal(header, second_line): """ Check if the two strings of the same number of lines :param header: the header of one of the simulation results files in the directory. This contains the names of the parameters and summary statistics. :param second_line: 2nd line of results file containing the parameter values and summary statistics :return: True or False """ cols_header = len(header.split('\t')) cols_second_line = len(second_line.split('\t')) if cols_header == cols_second_line: return True else: return False
def get_parent_path(thepath="."): """Returns the parent directory as an absolute path. Examples: >>> import os >>> os.getcwd() '/home/pengwin/Temp/pyplay/IMPORT_functions/my_py_funcs/worksheet_dir' >>> >>> get_parent_path() PosixPath('/home/pengwin/Temp/pyplay/IMPORT_functions/my_py_funcs') >>> >>> get_parent_path('bogus file name') PosixPath('/home/pengwin/Temp/pyplay/IMPORT_functions/my_py_funcs/worksheet_dir') >>> >>> get_parent_path('.') PosixPath('/home/pengwin/Temp/pyplay/IMPORT_functions/my_py_funcs') >>> >>> get_parent_path('_') PosixPath('/home/pengwin/Temp/pyplay/IMPORT_functions/my_py_funcs/worksheet_dir') >>> >>> get_parent_path('/does/this/path/exist') PosixPath('/does/this/path') Args: thepath (str, optional): Specify the path. Defaults to ".". Returns: pathlib.Path: Returns a pathlib Path() object """ import pathlib # return pathlib.Path(thepath).parent.absolute() return pathlib.Path(thepath).absolute().parent
def count_noun(number, noun, plural=None, pad_number=False, pad_noun=False): """ EXAMPLES:: sage: from sage.doctest.util import count_noun sage: count_noun(1, "apple") '1 apple' sage: count_noun(1, "apple", pad_noun=True) '1 apple ' sage: count_noun(1, "apple", pad_number=3) ' 1 apple' sage: count_noun(2, "orange") '2 oranges' sage: count_noun(3, "peach", "peaches") '3 peaches' sage: count_noun(1, "peach", plural="peaches", pad_noun=True) '1 peach ' """ if plural is None: plural = noun + "s" if pad_noun: # We assume that the plural is never shorter than the noun.... pad_noun = " " * (len(plural) - len(noun)) else: pad_noun = "" if pad_number: number_str = ("%%%sd"%pad_number)%number else: number_str = "%d"%number if number == 1: return "%s %s%s"%(number_str, noun, pad_noun) else: return "%s %s"%(number_str, plural)
def data_split(x_dataset, y_dataset, ratio=0.8): """ Split input data to test data and train data by ratio. Return splited data. """ x_dataset_size = len(x_dataset) y_dataset_size = len(y_dataset) x_dataset_train = x_dataset[:int(x_dataset_size * ratio)] y_dataset_train = y_dataset[:int(y_dataset_size * ratio)] x_dataset_test = x_dataset[int(x_dataset_size * ratio):] y_dataset_test = y_dataset[int(y_dataset_size * ratio):] return x_dataset_train, x_dataset_test, y_dataset_train, y_dataset_test
def get_samples_correlation_chains( mixin, trade_data, chain_length, sample_epoch): """ To get list of correlation chains based on sample epoch and distance """ chain = [] chains = [] for m in mixin: if m[1] == sample_epoch and len(chain) == 0: chain = [m] continue if len(chain) > 0 and m[0][0] < chain[0][0][0] + chain_length: chain.append(m) else: if len(chain) > 0: # price_diff = chain[-1][3] - chain[0][2] # trade_amount = sum( # trade_data.trades[tick] for tick in range( # chain[0][0], chain[-1][0] + 1)) # print price_diff * trade_amount * 0.15, price_diff, trade_amount, chain, "\n" # noqa chains.append(chain) chain = [] return chains
def uint_to_int(uint, bits): """ Assume an int was read from binary as an unsigned int, decode it as a two's compliment signed integer :param uint: :param bits: :return: """ if (uint & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255 uint = uint - (1 << bits) # compute negative value return uint
def calculate_weight(i, j, patterns): """Calculate the weight between the given neurons""" num_patterns = len(patterns) s = 0.0 for mu in range(num_patterns): s += patterns[mu][i] * patterns[mu][j] w = (1.0 / float(num_patterns)) * s return w
def get_keys(obj): """ Returns an iterator or iterable containing keys of given obj. """ if hasattr(obj, "keys"): return obj.keys() else: return range(len(obj))
def processor(document: dict) -> dict: """ The default document processor for job documents. Transforms projected job documents to a structure that can be dispatches to clients. 1. Removes the ``status`` and ``args`` fields from the job document. 2. Adds a ``username`` field. 3. Adds a ``created_at`` date taken from the first status entry in the job document. 4. Adds ``state`` and ``progress`` fields derived from the most recent ``status`` entry in the job document. :param document: a document to process :return: a processed document """ document["id"] = document.pop("_id") status = document.pop("status") last_update = status[-1] document.update({ "state": last_update["state"], "stage": last_update["stage"], "created_at": status[0]["timestamp"], "progress": status[-1]["progress"] }) return document
def map_chars_one_to_one(text_1: str, text_2: str) -> bool: """ in case the mapping is direct """ char_dict = {} for c1, c2 in zip(text_1, text_2): if c1 not in char_dict: char_dict[c1] = c2 elif char_dict[c1] != c2: return False return True
def check_double_quote(inpstring): """ Check if some strings needs of a double quote (if some space are inside the string, it will need to be inside two double quote). E.g.: --sfmt="TIFF (unstitched, 3D)" Input: inpstring: input string or array of strings Output: newstring = new string (or array of strings) corrected by quoting if necessary """ if type(inpstring) == list: newstring = [] for index in inpstring: tmp1 = index.find(" ") if tmp1 != -1: tmp2 = index.find('"') if tmp2 == -1: dummy = index.find("=") if dummy != -1: newstring.append( index[0 : dummy + 1] + '"' + index[dummy + 1 :] + '"' ) else: newstring.append('"' + index + '"') else: newstring.append(index) else: newstring.append(index) else: tmp1 = inpstring.find(" ") if tmp1 != -1: tmp2 = inpstring.find('"') if tmp2 == -1: dummy = inpstring.find("=") if dummy != -1: newstring = ( inpstring[0 : dummy + 1] + '"' + inpstring[dummy + 1 :] + '"' ) else: newstring = '"' + inpstring + '"' else: newstring = inpstring else: newstring = inpstring return newstring
def is_integer(num_str: str) -> bool: """ :param num_str: :return: """ if len(num_str) == 0: return False for ch in num_str: if not ch.isdigit() and ch != "_": return False return True
def __find_microhomology(seq_before, seq, seq_after): """ Finds the length of microhomology before or after a mutation. :param seq_before: the genomic sequence before the mutation (str) :param seq: the genomic sequence of the mutation (str) :param seq_after: the sequence after the mutation (str) :returns: number of repeats (int) """ i = 1 number_of_bases_found = 0 while seq[:i] == seq_after[:i]: number_of_bases_found += 1 i += 1 if number_of_bases_found == 0: i = 1 while seq[::-1][:i] == seq_before[::-1][:i]: number_of_bases_found += 1 i += 1 return number_of_bases_found
def extract_media_url(media): """extracts the url of a media item: an image post, or an image within a gallary I extract the data from the first items in "display_resources" instead of the standard "display_url" because this is the lowest resolution available """ return media["display_resources"][0]["src"]
def urljoin(*paths): """Join delimited path using specified delimiter. >>> assert urljoin('') == '' >>> assert urljoin('/') == '/' >>> assert urljoin('', '/a') == '/a' >>> assert urljoin('a', '/') == 'a/' >>> assert urljoin('', '/a', '', '', 'b') == '/a/b' >>> ret = '/a/b/c/d/e/' >>> assert urljoin('/a/', 'b/', '/c', 'd', 'e/') == ret >>> assert urljoin('a', 'b', 'c') == 'a/b/c' >>> ret = 'a/b/c/d/e/f' >>> assert urljoin('a/b', '/c/d/', '/e/f') == ret >>> ret = '/a/b/c/1/' >>> assert urljoin('/', 'a', 'b', 'c', '1', '/') == ret >>> assert urljoin([]) == '' """ paths = [path for path in paths if path] if len(paths) == 1: # Special case where there's no need to join anything. # Doing this because if paths==['/'], then an extra '/' # would be added if the else clause ran instead. path = paths[0] else: leading = '/' if paths and paths[0].startswith('/') else '' trailing = '/' if paths and paths[-1].endswith('/') else '' middle = '/'.join([path.strip('/') for path in paths if path.strip('/')]) path = ''.join([leading, middle, trailing]) return path
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if rng.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() return tokens_b
def first_come_first_served(evs, iface): """ Sort EVs by arrival time in increasing order. Args: evs (List[EV]): List of EVs to be sorted. iface (Interface): Interface object. (not used in this case) Returns: List[EV]: List of EVs sorted by arrival time in increasing order. """ return sorted(evs, key=lambda x: x.arrival)
def in2pt(inval=1): """1in -> 72pt""" return float(inval) * 72.0
def Factor_obs2int(a_lam): """ Calculate the factor to correct the observed flux or luminosity to the intrinsic one. Parameters ---------- a_lam : float The extinction in magnitude, A_lambda. Returns ------- f : float The correction factor to convert the observed values to the intrinsic ones. Notes ----- None. """ f = 10**(0.4 * a_lam) return f
def find_dict(mdicts, key, val): """ Given a list of mult-dicts, return the multi-dict that contains the given key/value pair. """ def found(d): return key in d and val in d[key] try: return [d for d in mdicts if found(d)][0] except IndexError: raise LookupError("Dictionary not located for key = %s, value = %s" % (key, val))
def _handle_sort_key(model_name, sort_key=None): """Generate sort keys according to the passed in sort key from user. :param model_name: Database model name be query.(alarm, meter, etc.) :param sort_key: sort key passed from user. return: sort keys list """ sort_keys_extra = {'alarm': ['name', 'user_id', 'project_id'], 'meter': ['user_id', 'project_id'], 'resource': ['user_id', 'project_id', 'timestamp'], } sort_keys = sort_keys_extra[model_name] if not sort_key: return sort_keys # NOTE(Fengqian): We need to put the sort key from user # in the first place of sort keys list. try: sort_keys.remove(sort_key) except ValueError: pass finally: sort_keys.insert(0, sort_key) return sort_keys
def capm(rf, beta, rm): """ calculate the minimum rate of return using CAPM (Capital Asset Pricing Model). parameters: ----------- rf: The risk free rate of retun. beta: The company's beta factor which measures the sensitivity of an investment return to market movement rm: market rate of return Notes. ------ The CAPM is an alternative method of calculating cost of equity capital for an investment. it is made up of two sides; Risk free rate i.e. the basic rate which all projects must earn if it is completely free from risk. and the Risk Premium which is gotten after applying the project beta to the difference between market return and risk rate of return. Example: -------- XYZ limited wants to determine its minimum required rate of return if the risk free rate is 7%, market rate of return is 10% and the company has a beta factor of 12. Solution -------- rf = 0.07, beta = 12, rm = 0.10 inserting the values into the formula: -------------------------------------- beta.capm(0.07, 1.2, 0.10), 0.106) = .106 i.e. 10.6% """ if rf >= 1 or rm >= 1: return "values cannot be greater than or equals 1" else: ke = rf + beta * (rm - rf) return round(ke, 4)
def is_unicode_safe(stream): """returns true if the stream supports UTF-8""" if not hasattr(stream, "encoding"): return False return stream.encoding in ["UTF_8", "UTF-8"]
def _get_dict_from_list(dict_key, list_of_dicts): """Retrieve a specific dict from a list of dicts. Parameters ---------- dict_key : str The (single) key of the dict to be retrieved from the list. list_of_dicts : list The list of dicts to search for the specific dict. Returns ------- dict value The value associated with the dict_key (e.g., a list of nodes or edges). """ the_dict = [cur_dict for cur_dict in list_of_dicts if cur_dict.get(dict_key)] if not the_dict: raise ValueError('Could not find a dict with key %s' % dict_key) return the_dict[0][dict_key]
def closest_multiple(n, k, ceil=True): """Return closest greater multiple of `k` to `n`.""" if n == 0: return 0 if n < k: return k if n % k == 0: return n return k * (n / k + (1 if ceil else 0))
def _center_strip_right(text: str, width: int) -> str: """Returns a string with sufficient leading whitespace such that `text` would be centered within the specified `width` plus a trailing newline.""" space = (width - len(text)) // 2 return space * " " + text + "\n"
def first(func, iterable): """Returns the first element in iterable for which func(elem) is true. Equivalent to next(ifilter(func, iterable)). """ for elem in iterable: if func(elem): return elem
def cmc_get_data(jso, cmc_id, pair_symbol='USD'): """ Pull relevant data from a response object """ if not jso: return None data = jso.get('data', {}) specific_data = data.get(str(cmc_id), {}) quote = specific_data.get('quote', {}) symbol_data = quote.get(pair_symbol, {}) return { 'price': symbol_data.get('price'), 'volume': symbol_data.get('volume_24h'), 'percent_change': symbol_data.get('percent_change_24h'), 'market_cap': symbol_data.get('market_cap'), }
def skip_doc(cls): """Returns True if we should skip cls in docstring extraction.""" return cls.__name__.endswith("Box") or (hasattr(cls, "no_doc") and cls.no_doc)
def fragment_to_keys(fragment): """Split a fragment, eg. #/components/headers/Foo in a list of keys ("components", "headers", "Foo") """ return fragment.strip("#").strip("/").split("/")
def _sanitize_features_name(explanations, features_name): """ Helper that provide generic features name (with the feature index) if features name is None. """ if features_name is None: single_explanation = len(explanations.shape)==1 if single_explanation: features_name = [f"Feature {str(j)}" for j in range(len(explanations))] else: features_name = [f"Feature {str(j)}" for j in range(explanations.shape[1])] return features_name
def calc_total_crab_fuel_complex(input_positions: list) -> int: """ Determine the fuel required for each crab to move to each position and return the fuel for the minimum consumption position Args: input_positions: list of input crab submarine positions Returns: total fuel required for all crabs to move to the lowest total fuel position using advanced calculation """ possible_positions = [0] * max(input_positions) for pos in range(len(possible_positions)): for crab in input_positions: dist = abs(crab - pos) possible_positions[pos] += int(dist * (dist + 1) / 2) return min(possible_positions)
def generate_ncr_sets(n, r): """ This is exactly same as the dp formula to compute nCr. C(n,r) = (n-1, r-1)[Choose the nth element] + C(n-1, r)[Dont choose] """ if r <= 0 or n <= 0 or r > n: return [[]] if r == n: # CAREFUL: Dont forget this base case! return [[i for i in range(1, n + 1)]] choose_current = generate_ncr_sets(n - 1, r - 1) for lst in choose_current: lst.append(n) dont_choose_current = generate_ncr_sets(n - 1, r) return choose_current + dont_choose_current
def find_discrete_state(x0, part): """Return index identifying the discrete state to which the continuous state x0 belongs to. Notes ===== 1. If there are overlapping partitions (i.e., x0 belongs to more than one discrete state), then return the first discrete state ID @param x0: initial continuous state @type x0: numpy 1darray @param part: state space partition @type part: L{PropPreservingPartition} @return: if C{x0} belongs to some discrete state in C{part}, then return the index of that state Otherwise return None, i.e., in case C{x0} does not belong to any discrete state. @rtype: int """ for (i, region) in enumerate(part): if x0 in region: return i return None
def findLargestAlphebetizedSubstr(inStr): """Finds the largest substring of inStr that is in alphebetical order.""" if(len(inStr) == 0): return "" curSubstr = inStr[0] curLargestSubstr = "" for i in inStr: if i.lower() >= curSubstr[-1].lower(): #Still alphabetical... curSubstr += i else: #No longer alphabetical... if len(curSubstr) > len(curLargestSubstr): #Test to see if this substring is longer than the last one. curLargestSubstr = curSubstr curSubstr = i #End looping through string if len(curSubstr) > len(curLargestSubstr): #Test to see if this final substring is longer than the biggest so far. return curSubstr return curLargestSubstr
def distance2(x1, y1, x2, y2): """Retourner la distance euclidienne entre (x1, y1) et (x2, y2).""" return ((x1-x2)**2 + (y1-y2)**2)**0.5
def raw_name_to_display(raw_name): """ Converts the given raw command or it's parameter's name to it's display name. Parameters ---------- raw_name : `str` The name to convert. Returns ------- display_name : `str` The converted name. """ return '-'.join([w for w in raw_name.strip('_ ').lower().replace(' ', '-').replace('_', '-').split('-') if w])
def find_pivot(input_list): """Find the pivot point of the sorted yet "shifted" list. A simple divide and conquer strategy to find the pivot point of the list. Time complexity O(log n) :param input_list: a sorted and pivoted list (i.e. "shifted") :type input_list: list :return: an index of the list being the pivot point :rtype: int """ start = 0 end = len(input_list)-1 while start<=end: mid = (start+end)//2 if input_list[start] <= input_list[end]: return start # the interval start-mid is sorted, then a pivot point is somewhere between mid-end elif input_list[start] <= input_list[mid]: start = mid+1 # the interval mid-end is sorted, then a pivot point is somewhere between start-mid else: end = mid return start
def append_path(filepath, path_list, path): """Append appropriate paths for testcase/suite/project in test folder """ temp_list = [] for file_name in path_list: file_name = path + file_name temp_list.append(file_name) if temp_list: filepath.extend(temp_list) return filepath
def divide_lists(lst_numer, lst_denom): """ Divides each element of the nested list 'lst_numer' by 'lst_denom'. The division is done by taking each element of 'lst_numer' and for each index of that element, dividing the item at that index by the item at the same index of 'lst_denom'. See example below: >>> numer = [[1., 2.], ... [3., 4.]] >>> denom = [0.5, 0.5] >>> divide_lists(numer, denom) [[2., 4.], [6., 8.]] NOTE: It is assumed that each element of 'lst_numer' has the same length as 'lst_denom' as shown in the dimensions of the arguments below. Arguments: lst_numer: nested list of list(dimensions N x M) lst_denom: list of denominators(dimensions 1 x M) Returns: A new list formed by dividing each element of 'lst_numer' by 'lst_denom' according to the division process described above. """ indexes = range(len(lst_denom)) return [[n[i] / float(lst_denom[i]) for i in indexes] for n in lst_numer]
def union(a, b): """ Returns the union of sets a and b. In plain english: Returns all the items of a and b combined with duplications removed. """ return a.union(b)
def intersection(l1, l2): """Return intersection of two lists as a new list:: >>> intersection([1, 2, 3], [2, 3, 4]) [2, 3] >>> intersection([1, 2, 3], [1, 2, 3, 4]) [1, 2, 3] >>> intersection([1, 2, 3], [3, 4]) [3] >>> intersection([1, 2, 3], [4, 5, 6]) [] """ #return set(l1) & set(l2) #Close, but no cigar #set(l1).intersection(l2) #Unsure why this didn't work, but returned nothing? return [x for x in l1 if x in l2]
def get_radius_attribute(p, attribute_name): """ Utility function to get an attribute from a tuple """ try: return next(value for name, value in p if name == attribute_name) except StopIteration: return None
def load_fdist_from_str(line): """ Load a frequency dictionary from a line of identifier, frequecy pairs, e.g. S 10 NP 5 VP 5 ... """ fdist = {} tokens = line.strip().split() nPair = int(len(tokens)/2) assert len(tokens) % 2 == 0 for i in range(nPair): identifier = tokens[i*2] freq = float(tokens[i*2+1]) fdist[identifier] = freq return fdist
def verify_on_off(flag): """ :type: str :param flag: On/Off parameter to check :rtype: str :return: Fix flag format :raises: ValueError: invalid form """ if flag is None: return None flag = str(flag).lower() if flag not in ['on', 'off']: raise ValueError('"{}" is not a valid parameter. ' 'Allowed values are: "ON" or "OFF"' .format(flag)) return flag
def normalisation_constant(unnormalised: float, normalised: float) -> int: """ Returns the constant used to normalise a value. :param unnormalised: The unnormalised value. :param normalised: The normalised value. :return: The normalisation constant. """ return round(unnormalised / normalised)
def bioconductor_tarball_url(package, pkg_version, bioc_version): """ Constructs a url for a package tarball Parameters ---------- package : str Case-sensitive Bioconductor package name pkg_version : str Bioconductor package version bioc_version : str Bioconductor release version """ return ( 'https://bioconductor.org/packages/{bioc_version}' '/bioc/src/contrib/{package}_{pkg_version}.tar.gz'.format(**locals()) )
def convert_subscripts(old_sub, symbol_map): """Convert user custom subscripts list to subscript string according to `symbol_map`. Examples -------- >>> oe.parser.convert_subscripts(['abc', 'def'], {'abc':'a', 'def':'b'}) 'ab' >>> oe.parser.convert_subscripts([Ellipsis, object], {object:'a'}) '...a' """ new_sub = "" for s in old_sub: if s is Ellipsis: new_sub += "..." else: # no need to try/except here because symbol_map has already been checked new_sub += symbol_map[s] return new_sub
def base36_to_int(s: str): """ Convert a base 36 string to an int. Raise ValueError if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is longer than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") return int(s, 36)
def _get_entity_type_sg_name_field(entity_type): """ Return the Shotgun name field to use for the specified entity type. This is needed as not all entity types are consistent! :param entity_type: The entity type to get the name field for :returns: The name field for the specified entity type """ return {"HumanUser":"name", "Task":"content", "Project":"name"}.get(entity_type, "code")
def generate_list(iterable_a, iterable_b): """Return a list of a+b combinations""" result = [] for iter_a in iterable_a: for iter_b in iterable_b: result.append(iter_a+iter_b) return result
def get_user_pool_domain(prefix, region): """Return a user pool domain name based on the prefix received and region. Args: prefix (str): The domain prefix for the domain region (str): The region in which the pool resides """ return '%s.auth.%s.amazoncognito.com' % (prefix, region)
def focal_length_calculator(width_in_image, distance_in_image, real_width_of_object): """ This function is used to calculate the focal length for the distnace estimation using triangle similarity :param1 (width_in_image): The width of the object in the reference image :param2 (distance_in_image): The distance of the object from the screen [measured] in the reference image :param3 (real_width_of_object): The real width of the object [measured] in the reference image :returns (focal_length): The focal_length is the product of all the parameters """ return ((width_in_image * distance_in_image)/real_width_of_object)
def limit_rgb(in_v, limiter): """ Takes a value and it's maximum potential and maps it to 0-255 :param in_v: input value :param limiter: value's maximum potential :return: int """ out_v = int((in_v/limiter) * 255) return out_v
def _package_root(name): """Convert treadmill.logging.xxx => treadmill """ return name.split('.', 1)[0]
def _cleanUpAllIssues(results, all_issues): """ Helper function for _getAllIssues(). Clean up issues and strip out cursor info. GIVEN: results (dict) -- initial query response with metadata all_issues (list) -- nested list of issues with comments and metadata RETURN: results (dict) -- cleaned up results """ results["data"]["repository"]["issues"]["edges"] = all_issues results["data"]["repository"]["issues"].pop("pageInfo") return results
def encrypt_file(filepath): """Encrypt file contents to base64. :param filepath: the path of the file. """ try: # if it starts with ~ # os.path.expanduser with open(filepath) as inf: file_contents = inf.read() return file_contents.encode('base64') except IOError: return filepath
def human_readable_patch(address, data, comp = None): """Returns a human readable string describing the patching action. """ s = "[0x%04X] returns 0x%02X" % (address, data); if comp is not None: s += " if read as 0x%02X" % comp; return s;
def is_valid(row, col): """ Returns a boolean based on whether on not a (row, col) pair is a valid board coordinate """ return ((row >= 0) and (row <= 9) and (col >= 0) and (col <= 9))
def get_multi_insert_str(columns_to_insert,insert_values_dict_lst): """ get_multi_insert_str creates the placeholder string for multiple insertion values. :param columns_to_insert: :param insert_values_dict_lst: :return: """ dict_lst= [] placeholder_str = ["%s"]*len(columns_to_insert) row_str = ",".join(placeholder_str) row_str = "("+row_str+")" row = [row_str] multi_row_str_lst = row*len(insert_values_dict_lst) multi_row_str = ",".join(multi_row_str_lst) return multi_row_str
def get_jyutping_from_mor(mor): """ Extract jyutping string from *mor*. """ jyutping, _, _ = mor.partition("=") jyutping, _, _ = jyutping.partition("-") jyutping, _, _ = jyutping.partition("&") return jyutping
def number_valid(value): """Test for valid number >>> number_valid(0) True >>> number_valid(-1) True >>> number_valid(1.12039123) True >>> number_valid('1.12039123') True >>> number_valid('x1.12039123') False >>> number_valid('t') False >>> number_valid('') True >>> number_valid(False) # not sure if this is what's we want True """ if value == '': return True try: float(value) return True except ValueError: return False
def is_data(data): """ Check if a packet is a data packet. """ return len(data) > 26 and ord(data[25]) == 0x08 and ord(data[26]) in [0x42, 0x62]
def where_point_in_poly(poly_points): """ a where bool, where geom is contained in the polygon made of the inputted points :input: an array of tuples [(lon, lat), (lon, lat)...]. THE LAST TUPLE SHOULD BE THE SAME AS THE FIRST :output: a where clause string """ poly_points_string = map("{0[0]} {0[1]}".format, poly_points) linestring = "LINESTRING(" + ",".join(poly_points_string) + ")" where = "ST_Contains( ST_Polygon( ST_GeomFromText('" + linestring + "'), 4326), geom)" return where
def union_dict(dict1, dict2): """combine two dicts Args: dict1(dict): only allow dict which value is int dict2(dict): only allow dict which value is int Returns: dict2: combined dict Examples: >>> d = Dict() >>> d.union_dict({"a": 1}, {"b": 2}) {'a': 1, 'b': 2} """ for key in dict1.keys(): if dict2.get(key) != None: dict2[key] += dict1[key] else: dict2[key] = dict1[key] return dict2
def is_iterable(testee): """Check if a value is iterable. This does no type comparisons. Note that strings are iterables too! >>> is_iterable([]) True >>> is_iterable("Hello World!") True >>> is_iterable(False) False """ try: iter(testee) return True except TypeError: return False
def cached_pow(number: float, power: float) -> float: """ Raises number to power. Caches the previous results so that if a same set of arguments is passed, it will fetch from the cache. """ return pow(number, power)
def GetKRStateFraction(alignedSeqList):#{{{ """return (cnt_K, cnt_R, per_K, per_R)""" lengthAlignment=len(alignedSeqList[0]) numSeq = len(alignedSeqList) cnt_K = [0]*lengthAlignment cnt_R = [0]*lengthAlignment for i in range(numSeq): alignedSeq = alignedSeqList[i] for j in range(lengthAlignment): s = alignedSeq[j] if s == 'K': cnt_K[j] += 1 elif s == 'R': cnt_R[j] += 1 per_K = [0.0]*lengthAlignment per_R = [0.0]*lengthAlignment numSeq_float = float(numSeq) for j in range(lengthAlignment): per_K[j] = cnt_K[j]/(numSeq_float) per_R[j] = cnt_R[j]/(numSeq_float) return (cnt_K, cnt_R, per_K, per_R)
def filter_toks(tokens, min_y, max_y_dist): """Currently just filters by max y distance between rows""" row_intervals = sorted([[t['top'], t['top'] + t['height']] for t in tokens if t['conf'] > .2]) row_intervals.sort(key=lambda interval: interval[0]) row_ranges = row_intervals for current in row_intervals: previous = row_ranges[-1] if current[0] - previous[1]: continue if current[0] <= previous[1]: previous[1] = max(previous[1], current[1]) else: row_ranges.append(current) if len(row_ranges) <= 1: return tokens prev_top, prev_bottom = 0, min_y new_ranges = [] for row_top, row_bottom in row_ranges: if abs(row_top - prev_bottom) > max_y_dist: break else: new_ranges.append([row_top, row_bottom]) prev_top, prev_bottom = row_top, row_bottom new_tokens = [] for token in tokens: for row_top, row_bottom in new_ranges: if token['top'] >= row_top and (token['top']+token['height']) <= row_bottom: new_tokens.append(token) break return new_tokens
def append_ordinal(number): """Add an ordinal string to an integer. :param number: The number to be converted to an ordinal. :type number: int :rtype: str """ suffixes = dict() for i, v in enumerate(['th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th']): suffixes[str(i)] = v v = str(number) if v.endswith("11") or v.endswith("12") or v.endswith("13"): return v + "th" # if v in ['11', '12', '13']: # return v + 'th' return v + suffixes[v[-1]]
def linear_rampup(current, rampup_length): """Linear rampup""" assert current >= 0 and rampup_length >= 0 if current >= rampup_length: return 1.0 else: return current / rampup_length
def _lisp_pad_before(surrounding_text): """Helper for lisps that determines whether leading padding is needed.""" return not ( surrounding_text and surrounding_text.char_before in [*"([{@,\n\r\t ", "", None] )
def region_domains(board): """ make list of tuples for each region to construct domains return dictionary of region : set of coord pairs """ regionCoords={} rowIndex=0 for row in board: colIndex=0 for colVal in row: if colVal in regionCoords: regionCoords[colVal].add((rowIndex,colIndex)) else: regionCoords[colVal]=set() regionCoords[colVal].add((rowIndex,colIndex)) colIndex +=1 rowIndex +=1 return regionCoords
def key(name): """Extracts the filename key.""" dot = name.rfind(".") if -1 != dot: return name[0:dot] return ""
def ext_euclid(x, y): """ Returns (g, a, b) such that g = gcd(x, y) = ax + by """ if y == 0: # gcd = x and gcd = x = (1)x + (0)y return (x, 1, 0) else: # Recursively, g = a1 * (y) + b1 * (x % y) (g, a1, b1) = ext_euclid(y, x % y) # a1 * (y) + b1 * (x % y) = b1 * x + (a1 - (x//y)) * y = g (g, a, b) = (g, b1, a1 - (x // y) * b1) return (g, a, b)
def sum_series(n, a = 0, b = 1): """ The function will determine which sequence to run The function with have one required parameter and two optional ones The required parameter will determine the element in the series to print The optional parameters will default to 0 and 1. The optional parameters will determine the first two values. Calling the function with no optional paramaters will produce fibonacci. Calling the function with optional parameters will produce values from the lucas series. """ if n == 0: return a elif n == 1: return b else: return sum_series(n-1, a, b) + sum_series(n-2, a, b)
def talk(text, is_yelling=False): """ Prints text is_yelling capitalizes text returns transformed text """ if is_yelling: text = text.upper() print(text) return text
def generate_docstring(operation_spec): """Generate a docstring for an operation defined in operation_spec (swagger)""" # Description of the operation docs = operation_spec.get("description", "No Description") docs += "\n\n" # Parameters of the operation parameters = operation_spec.get("parameters", []) if len(parameters) != 0: docs += "\tArguments:\n\n" for parameter in parameters: docs += "{0} ({1}:{2}): {3}\n".format( parameter.get("name"), parameter.get("type", "No Type"), "Required" if parameter.get("required", False) else "Not Required", parameter.get("description"), ) return docs
def get_job_type(name): """Returns job type based on its name.""" if 'phase1' in name: return 'phase1' elif 'phase2' in name: return 'phase2' elif 'dfg' in name: return 'dfg' else: return 'other'
def flag_true(argument): """ Check for a valid flag option (no argument) and return ``True``. (Directive option conversion function.) Raise ``ValueError`` if an argument is found. """ if argument and argument.strip(): raise ValueError('no argument is allowed; "%s" supplied' % argument) else: return True
def syntax_highlighter(input: str) -> str: """.repo-metadata.json language field to syntax highlighter name.""" if input == "nodejs": return "javascript" return input
def yellow_bold(msg: str) -> str: """ Given an 'str' object, wraps it between ANSI yellow & bold escape characters. :param msg: Message to be wrapped. :return: The same message, which will be displayed as yellow & bold by the terminal. """ return '\u001b[1;33m%s\u001b[0m' % msg
def heuristic(i0, j0, i1, j1): """ Squared eucledian distance. At the moment it's much faster without sqrt. (4000x4000 grid ~8s vs ~60s) If the script is not accurate use with math.sqrt. """ return ((i0 - i1) ** 2) + ((j0 - j1) ** 2)
def resistivity(rho,T,factor,T0=293): """ calculates temperature dependent resistivity """ return rho*(1 + (T-T0)*factor)
def health(): """ A simple check to see if the application is running """ return {"message": "Healthy"}
def check_namespace(name=""): """ replaces the colon with a hyphen :param name: :return: <str> name. """ if ':' in name: return '-'.join(name.split(':')) return name