content
stringlengths
42
6.51k
def check_gen_conditions(length, strength): """ Checks if conditions for password generating are correct.""" if strength > 4: strength = 4 # Reduce too high strength level to maximum level print( "\nNOTE: Given password strength was too high and it was reduced to maximum level, level 4." ) if strength < 1 or length < 1: if strength < 1: print("\nNOTE: Given strength should be in range 1 - 4.") if length < 1: print( "\nNOTE: Password length should be at least 1 (for level 1 strength)." ) return False elif length < strength: print( "\nNOTE: You gave wrong password length according to its strength." "\n\t Length should me at least equal to strength." ) return False else: return True
def format_hex(row_hash, col_hash, size=8): """Format dhash integers as hex string of size*size//2 total hex digits (row_hash and col_hash concatenated). >>> format_hex(19409, 14959, size=4) '4bd13a6f' >>> format_hex(1, 2, size=4) '00010002' """ hex_length = size * size // 4 return '{0:0{2}x}{1:0{2}x}'.format(row_hash, col_hash, hex_length)
def unique(seq, idfunc=None): """ Unique a list or tuple and preserve the order @type idfunc: Function or None @param idfunc: If idfunc is provided it will be called during the comparison process. """ if idfunc is None: idfunc = lambda x: x preserved_type = type(seq) seen = {} result = [] for item in seq: marker = idfunc(item) if marker in seen: continue seen[marker] = 1 result.append(item) return preserved_type(result) #return result #modified Jay
def getGlobalRadixInfo(n): """ For n larger than what can be computed using local memory fft, global transposes multiple kernel launces is needed. For these sizes, n can be decomposed using much larger base radices i.e. say n = 262144 = 128 x 64 x 32. Thus three kernel launches will be needed, first computing 64 x 32, length 128 ffts, second computing 128 x 32 length 64 ffts, and finally a kernel computing 128 x 64 length 32 ffts. Each of these base radices can futher be divided into factors so that each of these base ffts can be computed within one kernel launch using in-register ffts and local memory transposes i.e for the first kernel above which computes 64 x 32 ffts on length 128, 128 can be decomposed into 128 = 16 x 8 i.e. 8 work items can compute 8 length 16 ffts followed by transpose using local memory followed by each of these eight work items computing 2 length 8 ffts thus computing 16 length 8 ffts in total. This means only 8 work items are needed for computing one length 128 fft. If we choose work group size of say 64, we can compute 64/8 = 8 length 128 ffts within one work group. Since we need to compute 64 x 32 length 128 ffts in first kernel, this means we need to launch 64 x 32 / 8 = 256 work groups with 64 work items in each work group where each work group is computing 8 length 128 ffts where each length 128 fft is computed by 8 work items. Same logic can be applied to other two kernels in this example. Users can play with difference base radices and difference decompositions of base radices to generates different kernels and see which gives best performance. Following function is just fixed to use 128 as base radix """ base_radix = min(n, 128) numR = 0 N = n while N > base_radix: N //= base_radix numR += 1 radix = [] for i in range(numR): radix.append(base_radix) radix.append(N) numR += 1 R1 = [] R2 = [] for i in range(numR): B = radix[i] if B <= 8: R1.append(B) R2.append(1) else: r1 = 2 r2 = B // r1 while r2 > r1: r1 *= 2 r2 = B // r1 R1.append(r1) R2.append(r2) return radix, R1, R2
def fake_train(lrate, batch_size, arch): """Optimum: lrate=0.2, batch_size=4, arch='conv'.""" f1 = ( (lrate - 0.2) ** 2 + (batch_size - 4) ** 2 + (0 if arch == "conv" else 10) ) return f1
def weekday(date): """ Generate day of the week based on date (as string or as datetime object)""" if isinstance(date, str): from datetime import datetime date = datetime.strptime(date, "%m-%d-%Y") # change the format if necessary # "%Y-%m-%d" return date.strftime("%A")
def filtered_core_metadata(core): """Filter out some internal metadata, to avoid returning them via REST API.""" core = core.copy() for key in ["_systemd_coredump", "_systemd_journal", "_core_dir"]: if key in core: del core[key] return core
def get_split_size(dim_size, chunks): """ Computes the split size inline with ``torch.chunk`` Args: dim_size(int): Size of the dimension being chunked. chunks(int): Number of chunks to create for ``dim_size``. Returns: An int indicating the split size to use. """ return (dim_size + chunks - 1) // chunks
def varassign(v,X,E,rho, argument): """! Assigns input to specified gate set variables Parameters ------- v : numpy array New set of variables X : numpy array Current gate estimate E : numpy array Current POVM estimate rho : numpy array Current initial state estimate argument : {"X", "E", "rho"} Which part of the gate set is updated Returns ------- [.,.,.]: 3 element list List in the order [X,E,rho] where either X, E or rho is repaced by v, depending on the input to the "arguement" variable """ if argument == "X" or argument == "K": return [v,E,rho] elif argument == "E": return [X,v,rho] elif argument == "rho": return [X,E,v]
def contains (phrase,chars): """Returns TRUE if <phrase> contains ANY one of <chars>""" for x in chars: if x in phrase: return True return False
def escape_quotes(text): """Escape single and double-quotes for JavaScript""" return text.replace("'", r"\'").replace('"', r'\"')
def numlist_to_hyphen_list(number_list): """Converts number list into a sorted duplicate free hyphenated string.""" number_list = list(set(number_list)) number_list.sort() flag = 0 new_list = [] number_list_len = number_list.__len__() first_num = 0 last_num = 0 for num in number_list: num_index_current = number_list.index(num) num_index_next = num_index_current + 1 \ if num_index_current < number_list_len - 1 else None if num_index_next is not None: if num + 1 == number_list[num_index_next]: if flag == 0: first_num = num flag = 1 elif flag == 1: last_num = num num = '%s-%s' % (str(first_num), str(last_num)) first_num = 0 last_num = 0 flag = 0 elif first_num != 0: last_num = num num = '%s-%s' % (str(first_num), str(last_num)) flag = 0 if flag == 0: new_list.append(num) return new_list
def get_orig(term): """ Follows the 'orig' attribute until it comes to an object without that attribute. """ while hasattr(term, 'orig'): term = term.orig return term
def toExportS16(op): """Converts number to exportable signed 16-bit number.""" return max(min(int(round(op)), 32767), -32768)
def ToJavaStringLiteral(codepoint_list): """Returns string literal with surrogate pair and emoji support.""" if isinstance(codepoint_list, int): codepoint_list = (codepoint_list,) if not codepoint_list: return 'null' result = r'"' for codepoint in codepoint_list: utf16_array = bytearray(chr(codepoint).encode('utf-16be')) if len(utf16_array) == 2: (u0, l0) = utf16_array result += r'\u%02X%02X' % (u0, l0) else: (u0, l0, u1, l1) = utf16_array result += r'\u%02X%02X\u%02X%02X' % (u0, l0, u1, l1) result += r'"' return result
def config_char_exceptions(exceptions): """ placeholders for special characters within character exceptions. """ KEYS = { "{space}": " ", } try: del exceptions["__name__"] except KeyError: pass for k in KEYS: try: exceptions[KEYS[k]] = exceptions.pop(k) except KeyError: pass return exceptions
def PBT(initial_investment, cflow): """Compute Payback Time for given cash flows and initial investment""" year = 0 amount = 0 #Total money while amount < initial_investment: amount += cflow[year] year += 1 if year == len(cflow): return 'Infeasible' return year
def bytes2human(n, format="%(value).1f%(symbol)s"): """Converts n bytes to a human readable format. >>> bytes2human(1023) '1023.0B' >>> bytes2human(1024) '1.0KiB' https://github.com/giampaolo/psutil/blob/master/psutil/_common.py """ symbols = ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB') prefix = {} for i, s in enumerate(symbols[1:]): # Returns 1 with the bits shifted to the left by (i + 1)*10 places # (and new bits on the right-hand-side are zeros). This is the same # as multiplying x by 2**y. prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n)
def get_magnitude_dc(value): """Returns the 1 byte magnitude for a DC pixel.""" if value < 1: value *= -1 length = 0 while value: value >>= 1 length += 1 return length
def getJDfromMJD(mjd): """getJDfromMJD. Args: mjd: """ jd = mjd + 2400000.5 return jd
def get_major_release(version_id: str) -> str: """ Return the major release for a version. The major release for 1.17 and 1.17.1 is 1.17. """ if not len(version_id.split(".")) >= 2: raise ValueError(f"version not in expected format: '{version_id}'") return ".".join(version_id.split(".")[:2])
def set_bit(int_type, offset): """ >>> set_bit(2, 0) 3 """ return int_type | (1 << offset)
def recursive_dict_subset(d1, d2): """Recursively compare two dicts. Assuming d1 and d2 has the same structure, with d2 having less keys than d1. This function compares if all key value pairs in d2 are exactly the same in d1. """ if isinstance(d1, dict) and isinstance(d2, dict): for k in d2: if k not in d1: return False if not recursive_dict_subset(d1[k], d2[k]): return False return True return d1 == d2
def weighted_err(error_1, error_2, weight): """Calculates the weighted error rate between the two input parameters Keyword arguments: - error_1 - the first input error rate (FAR for zero effort impostors usually) - error_2 - the second input error rate (SFAR) - weight - the given weight """ return (1 - weight) * error_1 + weight * error_2
def isoformat(date): """Convert a datetime object to a ISO 8601 formatted string, with added None type handling >>> import datetime >>> d = datetime.datetime(2017, 8, 15, 18, 24, 31) >>> isoformat(d) '2017-08-15T18:24:31' Args: date (`datetime`): Input datetime object Returns: `str` """ return date.isoformat() if date else None
def is_challenge_phase_split_mapping_valid(phase_ids, leaderboard_ids, dataset_split_ids, phase_split): """ Arguments: phase_ids {array} -- list of phase ids leaderboard_ids {array} -- list of leaderboard ids dataset_split_ids {array} -- list of dataset split ids phase_split {dict} -- challenge phase split config Returns: is_success {boolean} -- flag for validation success """ phase_id = phase_split["challenge_phase_id"] leaderboard_id = phase_split["leaderboard_id"] dataset_split_id = phase_split["dataset_split_id"] if phase_id in phase_ids: if leaderboard_id in leaderboard_ids: if dataset_split_id in dataset_split_ids: return True else: return False else: return False return False
def prepare_model_settings(sample_rate, clip_duration_ms): """Calculates common settings needed for all models. Args: sample_rate: Number of audio samples per second. clip_duration_ms: Length of each audio clip to be analyzed. Returns: Dictionary containing common settings. """ desired_samples = int(sample_rate * clip_duration_ms / 1000) return { 'desired_samples': desired_samples, }
def bezier_quadratic_terms(t): """ Simplified Bezier quadratic curve. Return 3 terms in list () """ m_terms = list() # n=2 i=0 # m_C(n, i) * (1 - t)**(n - i) * t**i # m_C(2, 0) * (1 - t)**(2 - 0) * t**0 # 1 * (1 - t)*(1 - t) * 1 m_terms.append((1 - t)*(1 - t)) # n=2 i=1 # m_C(n, i) * (1 - t)**(n - i) * t**i # m_C(2, 1) * (1 - t)**(2 - 1) * t**1 # 2 * (1 - t) * t m_terms.append(2 * (1 - t) * t) m_terms.append(t*t) return m_terms
def invert_pairs(trading_pairs: list) -> list: """ swap quote w/ base :param list(trading_pairs): ["AAA-BBB", "BBB-CCC", "DDD-AAA"] :return list(trading_pairs): ["BBB-AAA", "CCC-BBB", "AAA-DDD"] """ return [f"{i.split('-')[1]}-{i.split('-')[0]}" for i in trading_pairs]
def is_in_string(char, string): """Check for char occurence in provided string >>> is_in_string('3', str(15)) False >>> is_in_string('5', str(15)) True """ for i in range(0, len(string)): if string[i] == char: return True return False
def list_to_pretty_string(l, quote_char=False): """ Takes a list of strings and makes it into a single, pretty string with commas, the word 'and' and that type of shit. """ l = list(l) if len(l) == 0: return None elif len(l) == 1: if quote_char: return "%s%s%s" % (quote_char, l[0], quote_char) else: return l[0] if quote_char: l = [str("%s%s%s" % (quote_char, i, quote_char)) for i in l] else: l = [str(i) for i in l] return " and ".join([ ", ".join(l[:-1]), l[-1] ])
def relative_path_to_absolute(points): """ Input: list of relatives points (first point is absolute, others relative to previous ones) Output: is the list of absolute points. """ x = 0 y = 0 abs_path=list() for pt in points: x = pt[0] + x y = pt[1] + y abs_path.append([x,y]) return abs_path
def bad_fibonacci(n): """Return the nth Fibonacci number.""" if n <= 1: return n else: return bad_fibonacci(n-2) + bad_fibonacci(n-1)
def cipher(text, shift, encrypt=True): """ Encrypts and decrypts codes using the Caesar cipher Parameters ---------- text : code to encrypt or decrypt shift : the number of places shifted down the alphabet for this particular encrypted / decrypted code encrypt : boolean to indicate whether function should be used to encrypt or decrypt text Returns ------- new_text the encrypted (if encrypt = True) or decrypted (if encrypt = False) message Examples -------- >>> from cipher_jq2334 import cipher >>> cipher('Hello', 1, encrypt = True) ['Ifmmp'] >>> cipher('Ifmmp', 1, encrypt = False) ['Hello'] """ alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' new_text = '' for c in text: index = alphabet.find(c) if index == -1: new_text += c else: new_index = index + shift if encrypt == True else index - shift new_index %= len(alphabet) new_text += alphabet[new_index:new_index+1] return new_text
def find_first_occurrence(target, ls): """ Question 12.1 """ left = 0 right = len(ls) - 1 result = -1 while left <= right: mid = left + ((right - left) // 2) if ls[mid] > target: right = mid - 1 elif ls[mid] == target: result = mid right = mid - 1 else: left = mid + 1 return result
def str_to_bool(val): """ Convert a string representation of truth to True or False True values are 'y', 'yes', or ''; case-insensitive False values are 'n', or 'no'; case-insensitive Raises ValueError if 'val' is anything else. """ true_vals = ['yes', 'y', ''] false_vals = ['no', 'n'] try: val = val.lower() except AttributeError: val = str(val).lower() if val in true_vals: return True elif val in false_vals: return False else: raise ValueError("Invalid input value: %s" % val)
def id_func(item: dict) -> str: """ Returns a unique id for each test, just make testing clear """ return item["input"]
def call_if_callable(v): """ Preprocess a value: return it ; but call it, if it's a lambda (for late binding) """ return v() if callable(v) else v
def empty_type_dict(input_object): """Generates an otherwise empty Python dict with the correct "@type" key from Java. :param input_class: arbitrary instance of a Python class :return: """ d = dict() d["@type"] = input_object.__class__.__name__ return d
def to_hass_level(level): """Convert the given Lutron (0.0-100.0) light level to HASS (0-255).""" return int((level * 255) / 100)
def find_parameter(params, key, index=None): """ This is the hearth of the plotting structure, this functions allows one to simply set multiple characteristics for only one figure The priority for parameter search is plots => figures => global, with plot overriding others and so on If a parameter is defined globally it will be applied to all figures, if it defined inside a figure element it will only apply to that figure, if it is defined in a plot element it will only apply to that curve Check the readme or the tutorials for more details on the plotting structure. It is simple and versatile Parameters: params (dict) = Plot parameters from python dictionary (after json conversion) key (str) = Key necessary to access the parameters index (int) = None for global search, one index for figure search, and two for figure curve search Returns: The parameter if found, and None if nothing is found """ # No index is given, look global if index is None: try: return params[key] except (KeyError, IndexError): return None # Search a parameter # If local return local, otherwise return global # If not found return nothing elif type(index) == int: try: return params['figures'][index][key] except (KeyError, IndexError): try: return params[key] except (KeyError, IndexError): return None # If two indexes are given # Look inside the plot # Than figure # Than global elif type(index) == tuple: try: return params['figures'][index[0]]['plots'][index[1]][key] except (KeyError, IndexError): try: return params['figures'][index[0]][key] except (KeyError, IndexError): try: return params[key] except (KeyError, IndexError): return None
def valid_category(category): """ This function checks if the given category is valid """ if not isinstance(category, str): return "Invalid format. Category must be a string" if len(category.strip()) == 0: return "Invalid category. Category can't be empty" if len(category.strip()) < 3: return "Invalid category. Category must be at least 3 characters long" return "Valid"
def switch_encoding(phasing): """ >>> switch_encoding('0001011') '001110' """ assert isinstance(phasing, str) return "".join(("0" if phasing[i - 1] == phasing[i] else "1") for i in range(1, len(phasing)))
def rotation(rotor ,i): """ Rotations of i positions """ return rotor[-i:]+rotor[:len(rotor)-i]
def calculate(a,b,c): """ :param a: the first number to be input :param b: the second number to be input :param c: the third number to be input :return: the result of the formula """ discriminant = b*b-4*a*c return discriminant
def is_list_with_max_len(value, length): """ Is the list of given length or less? :param value: The value being checked :type value: Any :param length: The length being checked :type length: Nat :return: True if the list is of the length or less, False otherwise :rtype: bool """ return isinstance(value, list) and len(value) <= length
def clean_size(string): """ Utility method to remove unnecessary information from a file size string, eg. '6.5 GBytes' -> '6.5 GB' Args: string (str): File size string to clean up Returns: str: Cleaned up file size """ if string: pos = string.rfind('B') if pos > 0: string = string[:pos] + 'B' return string
def get_subject_from_components(components): """Return the certificate subject from components list. >>> components = [('C', 'FR'), ('ST', 'Ile-de-France'), ('L', 'Paris'), ... ('O', 'Test Ltd'), ('OU', 'Test'), ('CN', 'Alain Dupont'), ... ('emailAddress', 'alain.dupont@localhost')] >>> print get_subject_from_components(components) /C=FR/ST=Ile-de-France/L=Paris/O=Test Ltd/OU=Test/CN=Alain \ Dupont/emailAddress=alain.dupont@localhost """ return u'/' + u'/'.join(['%s=%s' % (a, b) for a, b in components])
def cigar_has_insertion(cigar): """Check if cigarstring has an I in it. Return boolean""" has_insertion = False if cigar is not None: if 'I' in cigar: has_insertion = True return has_insertion
def overlap(bbox1, bbox2): """Returns ``True`` if the two bounding boxes overlap at all, ``False`` otherwise. Both ``bbox1`` and ``bbox2`` are expected to be sequences of ``(low, high)`` tuples containing the bounds of each axis in the coordinate system that the bounding boxes are defined in. """ def axisOverlap(lo1, hi1, lo2, hi2): return (hi1 >= lo2) and (lo1 <= hi2) or \ (hi2 >= lo1) and (lo2 <= hi1) for (lo1, hi1), (lo2, hi2) in zip(bbox1, bbox2): if not axisOverlap(lo1, hi1, lo2, hi2): return False return True
def get_weighted_mean(grouped_image_color): """ calculate every group's weighted mean r_weighted_mean = sigma(r * count) / sigma(count) g_weighted_mean = sigma(g * count) / sigma(count) b_weighted_mean = sigma(b * count) / sigma(count) """ sigma_count = 0 sigma_r = 0 sigma_g = 0 sigma_b = 0 for item in grouped_image_color: sigma_count += item[0] sigma_r += item[1][0] * item[0] sigma_g += item[1][1] * item[0] sigma_b += item[1][2] * item[0] r_weighted_mean = int(sigma_r / sigma_count) g_weighted_mean = int(sigma_g / sigma_count) b_weighted_mean = int(sigma_b / sigma_count) weighted_mean = (sigma_count, (r_weighted_mean, g_weighted_mean, b_weighted_mean)) return weighted_mean
def get_entity_name(entity): """ Get entity name of given entity. If no "entityName" is specified return "aName". :param entity: entity element :return: entity_name """ entity_name = entity.get("entityName") if not entity_name: entity_name = "aName" return entity_name
def fib(n): """ every number after the first two is the sum of the two preceding ones """ if n == 0: return 0 elif n == 1: return 1 else: return fib(n - 1) + fib(n - 2)
def vector_add(v, w): """soma elementos correspondentes""" return [v_i + w_i for v_i, w_i in zip(v, w)]
def iterable_len(iterable): """ Calculates the length of any iterable (iterators included!) """ return sum(1 for _ in iterable)
def compute_average_word_length(tokens): """ Calculate word length for a sentence :param tokens: a list of words :return: The average length of words in this list """ word_lengths = [len(word) for word in tokens] return sum(word_lengths) / len(word_lengths)
def get_copyright(info_list): """gets a concatentated list of unique copyrights from a list of PassageInfo objects.""" copyright = set([passage.copyright_display for passage in info_list]) return "\n".join(copyright)
def modified_scientific_notation(num): """ Convert numerical value to modified scientific notation. If length of num is greater than 22, return num as it is. >>> modified_scientific_notation(1) 1 Otherwise, return concatenated two parts, mantissa without integral part and exponent. >>> modified_scientific_notation(123456789012345678901234567890) # 1.2345678901234568e+29 234567890123456829 :type num: int :rtype: int """ if len(str(num)) < 22: return num import re p = re.split('(\.|e\+)', '{:.16e}'.format(num)) return int(p[2] + p[4])
def headers(dms=False): """Constructs the header row for the CSV file""" if dms: return ['ID', 'Created', 'Sender ID', 'Sender Name', 'Message'] else: return ['ID', 'Created', 'Favorited', 'Truncated', 'Reply Status ID', 'Reply User ID', 'Reply Screen Name', 'Source', 'Tweet']
def map_level(level): """ Maps logging level strings to logging level codes Parameters: level (String): The level string to be mapped. Returns: Integer: Number that matches the logging level. """ return {"critical": 50, "error": 40, "warning": 30, "info": 20, "debug": 10}.get( level, 10 )
def convert_dictionary(room_to_student): """ Converts the dictionary mapping room_to_student to a valid return of the solver Args: room_to_student: Dictionary of room to a list of students Returns: D: Dictionary mapping student to room e.g {0: [1,2,3]} ==> {1:0, 2:0, 3:0} """ d = {} for room, lst in room_to_student.items(): for student in lst: d[student] = room return d
def folders_mapping(folders): """Return a mapping from folder to class and a mapping from class to folder.""" folder2idx = {} idx2folder = {} for idx, folder in enumerate(folders): folder2idx[folder] = idx idx2folder[idx] = folder return folder2idx, idx2folder
def solution2(A, k): # O(N) """ Write a function to left rotate a list a by k number of times. eg. [1, 2, 3] if k = 1, then result = [2, 3, 1] if k = 2, then result = [3, 1, 2] >>> solution2([1, 2, 3, 4, 5], 4) [5, 1, 2, 3, 4] >>> solution2([1, 2, 3], 1) [2, 3, 1] >>> solution2([1, 2, 3], 2) [3, 1, 2] >>> solution2([1, 2, 3], 22) [2, 3, 1] """ output = [] # O(1) for i in range(0, len(A)): # O(N) output.append(None) # O(1) for i, value in enumerate(A): # O(N) pos = (len(A) - k + i) % len(A) # O(1) output[pos] = value # O(1) return output # O(1)
def find_key(obj, key): """Recursively fetch values from nested JSON.""" arr = [] def extract(obj, arr, key): """Recursively search for values of key in JSON tree.""" if isinstance(obj, dict): for k, v in obj.items(): if isinstance(v, (dict, list)): extract(v, arr, key) elif k == key: arr.append(v) elif isinstance(obj, list): for item in obj: extract(item, arr, key) return arr values = extract(obj, arr, key) return values
def mul(a, b, dryrun=False): """ a: Integer a b: Integer b out: result: Multiplication of a + b """ if dryrun: return {'result': 'add'} return { 'result': a * b}
def merge_counts(dict1, dict2): """Merge `dict1` of (word, freq1) and `dict2` of (word, freq2) into `dict1` of (word, freq1+freq2). Parameters ---------- dict1 : dict of (str, int) First dictionary. dict2 : dict of (str, int) Second dictionary. Returns ------- result : dict Merged dictionary with sum of frequencies as values. """ for word, freq in dict2.items(): if word in dict1: dict1[word] += freq else: dict1[word] = freq return dict1
def delete(seq, pos, deletions): """Return the sequence with a number of deletions from position pos.""" return seq[:pos] + seq[pos + deletions :]
def find_min_pos(input_list): """Function to find index with minimum value.""" length = len(input_list) if length == 0: return -1 curr_min = input_list[0] curr_min_index = 0 for j in range(1, length): if curr_min > input_list[j]: curr_min = input_list[j] curr_min_index = j return curr_min_index
def detect_bradycardia(heart_rate): """ This function makes best guess as to whether bradycardia is being exhibited :param float heart_rate: heart rate in bpm :return ble bradycardia: whether or not bradycardia detected """ import logging as log log.debug("Checking for bradycardia.\n") hr_low = 50 # Assuming a heart rate below 50 bpm is too slow bradycardia = False if heart_rate < hr_low: bradycardia = True return bradycardia
def _to_version_info(version): """Convert a version string to a number and string tuple.""" parts = [] for part in version.split('.'): try: part = int(part) except ValueError: pass parts.append(part) return tuple(parts)
def harmonic_in_band(fvco, bands): """Get description of band containing fvco frequency :param fvco: frequency :type fvco: ``int`` or ``float`` :param bands: Tuple of tuples for each band: ((lo_freq, hi_freq, description), ()...) :type bands: ((``float``, ``float``, ``str``), ()...) :return: Description of the band containing fvco, otherwise None :rtype: ``str`` """ for lower, upper, desc in bands: if int(lower/fvco) != int(upper/fvco): return desc return None
def merge_from_store_and_in_mems(from_store, in_mem_shas, dont_update_shas_of): """ If we don't merge the shas from the sha store and if we build a subgraph, the .shastore will only contain the shas of the files from the subgraph and the rest of the graph will have to be rebuilt """ if not from_store: for item in dont_update_shas_of: if item in in_mem_shas['files']: del in_mem_shas['files'][item] return in_mem_shas for key in from_store['files']: if key not in in_mem_shas['files'] and key not in dont_update_shas_of: in_mem_shas['files'][key] = from_store['files'][key] for item in dont_update_shas_of: if item in in_mem_shas['files']: del in_mem_shas['files'][item] return in_mem_shas
def filter_punctuation(sentence, punctuation=':(\'-,%>.[?)"=_*];&+$@/|!<#`{~\}^'): """ Args: sentence (str): string which needs to filter the punctuation punctuation (str): the punctuation which is unnecessary Returns: result (str): string without the unnecessary punctuation """ temp_result = [] for item in sentence: # ':(\'-,%>.[?)"=_*];&+$@/|!<#`{~\}^': if item in punctuation: continue temp_result.append(item) result = "".join(temp_result) return result
def hms2deg(h, m, s): """Convert from hour, minutes, seconds to degrees Args: h (int) m (int) s (float) Return: float """ return h * 360 / 24 + m / 60.0 + s / 3600.0
def _fmt_rank(val): """Returns value (between 0 and 1) formatted as a percentage.""" return '%.5f' % (100 * val)
def _format_result(criterion_name, success, detail): """Returns the SLA result dict corresponding to the current state.""" return {"criterion": criterion_name, "success": success, "detail": detail}
def getattr(object,attribute,default_value=None): """Get a propoerty of an object attribute: e.g. 'value' or 'member.value'""" if default_value is None: return eval("object."+attribute) try: return eval("object."+attribute) except: return default_value
def cleanup_code(content: str) -> str: """ Automatically removes code blocks from the code. Parameters ---------- content : str The content to be cleaned. Returns ------- str The cleaned content. """ # remove ```py\n``` if content.startswith("```") and content.endswith("```"): return "\n".join(content.split("\n")[1:-1]) # remove `foo` return content.strip("` \n")
def evaluate_group(flags): """evaluate group will take a list of flags (eg: [True, and, False, or, True] And read through the logic to determine if the image result is to be flagged. This is how we combine a set of criteria in a group to come to a final decision. """ flagged = False first_entry = True # If it starts with and and/or, remove it if flags and flags[0] in ["and", "or"]: flags.pop(0) while len(flags) > 0: flag = flags.pop(0) if flag == "and": flag = flags.pop(0) flagged = flag and flagged elif flag == "or": flag = flags.pop(0) flagged = flag or flagged else: # If it's the first entry if first_entry is True: flagged = flag else: flagged = flagged and flag first_entry = False return flagged
def add_zeros_in_front_and_convert_to_string(number: int, order: int) -> str: """ Adds zeros in front of the number, and return without the first digit. Usage: number=5, order=100 -> "05" :param number: int. :param order: int. :return: str. """ return str(number + order)[1:]
def nice_int(s): """ Workaround that int('1.0') raises an exception @param s: string to be converted to integer """ try: return int(s) except ValueError: return int(float(s))
def DNAtoRNA(dna): """ dna_to_rna == PEP8 (forced camelCase by CodeWars) """ return dna.replace('T', 'U')
def get_palette(num_cls): """ Returns the color map for visualizing the segmentation mask. Args: num_cls: Number of classes Returns: The color map """ n = num_cls palette = [0] * (n * 3) for j in range(0, n): lab = j palette[j * 3 + 0] = 0 palette[j * 3 + 1] = 0 palette[j * 3 + 2] = 0 i = 0 while lab: palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) i += 1 lab >>= 3 return palette
def _strip_header(g): """removes the GP header""" if g[:2] == b'GB': return g[8:] return g
def box(t, t_start, t_stop): """Box-shape (Theta-function) The shape is 0 before `t_start` and after `t_stop` and 1 elsewhere. Args: t (float): Time point or time grid t_start (float): First value of `t` for which the box has value 1 t_stop (float): Last value of `t` for which the box has value 1 Note: You may use :class:`numpy.vectorize`, :func:`functools.partial`, or :func:`qutip_callback`, cf. :func:`flattop`. """ if t < t_start: return 0.0 if t > t_stop: return 0.0 return 1.0
def transform_to_another_scale(x,a,b,c,d): """ tranforms a value x from the scale [a,b] to the scale [c,d] """ return (x-a)*((d-c)/(b-a))+c
def quantile(x, p): """returns the pth-percentile value in x""" p_index = int(p * len(x)) return sorted(x)[p_index]
def calc_PnPs(counts): """ counts = {'obs syn':#, 'pos syn':#, 'obs non-syn':#, 'pos non-syn':#} """ if counts['pos non-syn'] == 0 or counts['pos syn'] == 0: return 'n/a' NonSyn = float(counts['obs non-syn'])/float(counts['pos non-syn']) Syn = float(counts['obs syn']) /float(counts['pos syn']) if Syn == 0: return 'n/a' return NonSyn/Syn
def no_perf_degrad(optimzed_time: float, non_optimized_time: float, rtol: float = 5e-2) -> bool: """Assert no performance degradation. Args: optimzed_time (float): [description] non_optimized_time (float): [description] rtol (float, optional): Max tolerant runtime degradation. Defaults to 5e-3. """ frac_down = max(non_optimized_time, optimzed_time) frac_up = min(non_optimized_time, optimzed_time) relative_err = 1 - frac_up / frac_down return optimzed_time > non_optimized_time and relative_err > rtol
def get_str_ip(list_ip): """ turns a list of 4 integers into IP address format. """ return ".".join([str(_) for _ in list_ip])
def second_valid_range_str(second_valid_range): """ Fixture that yields a string representation of a range within the bounds of the "second" field. """ start, stop = second_valid_range[0], second_valid_range[-1] return '{0}-{1}'.format(start, stop)
def mode(nums): """Return most-common number in list. For this function, there will always be a single-most-common value; you do not need to worry about handling cases where more than one item occurs the same number of times. >>> mode([1, 2, 1]) 1 >>> mode([2, 2, 3, 3, 2]) 2 """ return max(set(nums), key=nums.count)
def n_rows_cols(pixel_index, block_size, rows_cols): """ Adjusts block size for the end of image rows and columns. Args: pixel_index (int): The current pixel row or column index. block_size (int): The image block size. rows_cols (int): The total number of rows or columns in the image. Example: >>> n_rows = 5000 >>> block_size = 1024 >>> i = 4050 >>> adjusted_block_size = n_rows_cols(i, block_size, n_rows) Returns: Adjusted block size as int. """ return block_size if (pixel_index + block_size) < rows_cols else rows_cols - pixel_index
def timerange_validate (BEGIN_DAY, BEGIN_MONTH, BEGIN_YEAR, END_DAY, END_MONTH, END_YEAR): """Validate time range to cut log""" RESULT = 2 if BEGIN_YEAR > END_YEAR: print ("Invalid time range. END YEAR is lower than BEGIN YEAR") RESULT = 1 elif BEGIN_YEAR == END_YEAR: if BEGIN_MONTH > END_MONTH: print ("Invalid time range. END MONTH is lower than BEGIN MONTH") RESULT = 1 elif BEGIN_MONTH == END_MONTH: if BEGIN_DAY > END_DAY: print ("Invalid time range. END DAY is lower than BEGIN DAY") RESULT = 1 else: RESULT = 0 else: RESULT = 0 return RESULT
def _filter_compatible(inp, cls, attr, else_None=False): """Filter common data structures compatible with UFloat.""" if else_None: inp = tuple(getattr(x, attr) if isinstance(x, cls) else None for x in inp) else: inp = tuple(getattr(x, attr) if isinstance(x, cls) else x for x in inp) return inp
def format_line(data, section): """concatinate data chuncks and add section marker and line counter :param data: list of values :type data: list :param section: letter for the corresponding IGES section :type section: string """ out = "" for i in range(0, len(data)): new_data = "{:<72}".format(data[i]) new_section = "{:7}".format(i + 1) out += "\n" + new_data + section + new_section #old: out = "{}{}{:<72}{}{:7}".format(out, "\n", data[i], section, i + 1) return out
def _process_upsert(existing, change): """ Process an I{UPSERT} change. """ # Replace whatever was there with whatever was specified. return change
def collect_first_sep(_, nodes): """ Used for: Elements = Elements "," Element; """ e1, _, e2 = nodes if e2 is not None: e1 = list(e1) e1.append(e2) return e1
def pixel2meter(x_in_pixel, m2p_ratio): """ convert pixel in world meter""" return x_in_pixel*1.0/m2p_ratio
def fix_output_name(name: str): """Removes the "Identity:0" of a tensor's name if it exists""" return name.replace("/Identity:0", "", 1)