content
stringlengths
42
6.51k
def product_name(d, i, r): """Get the product name from the item. Defaults to ''. :param d: Report definition :type d: Dict :param i: Item definition :type i: Dict :param r: Meter reading :type r: usage.reading.Reading :return: Product name :rtype: String """ return i.get('product_name', '')
def rsa_encrypt(data: int, e: int, n: int) -> int: """ encrypt data with the rsa cryptosystem (rsa_fernet_encrypt is more secure and supports more data) :param data: the plaintext :param e: public key (e) of the other person :param n: public key (n) of the other person :return: the ciphertext """ if data > n: raise OverflowError('') return pow(data, e, n)
def absdiff(x,y,floor=0): """ Checks absolute difference between x and y If diff < floor, 0 is returned floor would be a decimal value, e.g. 0.05 """ d = abs(x-y) if floor and d < floor: d = 0 return d
def get_argc(f): """Return the required argument count of the provided function, f.""" return f.__code__.co_argcount
def is_owner(username, activity): """ Returns true if the given username is the owner of the given activity. """ if not username: return False return username == activity.username
def number_of_yang_modules_that_passed_compilation(in_dict: dict, position: int, compilation_condition: str): """ Return the number of the modules that have compilation status equal to the 'compilation_condition'. Arguments: :param in_dict (dict) Dictionary of key:yang-model, value:list of compilation results :param position (int) Position in the list where the 'compilation_condidtion' is :param compilation_condition (str) Compilation result we are looking for - PASSED, PASSED WITH WARNINGS, FAILED :return: the number of YANG models which meet the 'compilation_condition' """ t = 0 for k, v in in_dict.items(): if in_dict[k][position - 1] == compilation_condition: t += 1 return t
def determinant_3x3(m): """ float <- determinant_3x3(m) returns the determinant of the 3x3 matrix m """ x = m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) y = m[1][0] * (m[2][1] * m[0][2] - m[0][1] * m[2][2]) z = m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]) return (x + y + z)
def norm_type_dual(norm_type): """Return the dual norm as string.""" if norm_type == 'linf': norm_dual = 'l1' elif norm_type == 'l1': norm_dual = 'linf' elif norm_type == 'dft1': norm_dual = 'dftinf' elif norm_type == 'dftinf': norm_dual = 'dft1' else: # 1/p + 1/q = 1 => q = p/(p-1) p = float(norm_type[1:]) norm_ord = p / (p - 1) norm_dual = 'l%g' % norm_ord return norm_dual
def split_part_key(key): """ Split the standard part key. Only returns port if present in key. Parameters ---------- key : str Standard part key as hpn:rev Returns ------- tuple hpn, rev, [,port] """ split_key = key.split(":") if len(split_key) == 2: return split_key[0], split_key[1] return split_key[0], split_key[1], split_key[2]
def strtobool(val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. This function has been borrowed from distutils.util module in order to avoid pulling a dependency on deprecated module "imp". """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return 1 elif val in ('n', 'no', 'f', 'false', 'off', '0'): return 0 else: raise ValueError("invalid truth value %r" % (val,))
def monthCode(month): """ perform month->code and back conversion Input: either month nr (int) or month code (str) Returns: code or month nr """ codes = ('F','G','H','J','K','M','N','Q','U','V','X','Z') if isinstance(month,int): return codes[month-1] elif isinstance(month,str): return codes.index(month)+1 else: raise ValueError('Function accepts int or str')
def fill_outputs(gd): """ Fills the output lists of of a graph of ParsedNode Takes a graph in "dict{str, ParsedNode}" form, and returns a new graph. """ # fill outputs for k, v in gd.items(): for i in v.inputs: gd[i].outputs.append(v.name) for i in v.control_inputs: gd[i].control_outputs.append(v.name) get_tuple_ops = ['Split', 'SplitV'] for k, v in gd.items(): if v.op in get_tuple_ops: outputs = [[out, int(gd[out].attr['index'])] for out in v.outputs] outputs.sort(key=lambda x: x[1]) gd[k].outputs = [out for [out, _] in outputs] return gd
def parseAcceptHeader(value): """Parse an accept header, ignoring any accept-extensions returns a list of tuples containing main MIME type, MIME subtype, and quality markdown. str -> [(str, str, float)] """ chunks = [chunk.strip() for chunk in value.split(',')] accept = [] for chunk in chunks: parts = [s.strip() for s in chunk.split(';')] mtype = parts.pop(0) if '/' not in mtype: # This is not a MIME type, so ignore the bad data continue main, sub = mtype.split('/', 1) for ext in parts: if '=' in ext: k, v = ext.split('=', 1) if k == 'q': try: q = float(v) break except ValueError: # Ignore poorly formed q-values pass else: q = 1.0 accept.append((q, main, sub)) accept.sort() accept.reverse() return [(main, sub, q) for (q, main, sub) in accept]
def extract_ranges(index_list, range_size_limit=32): """Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists it will be returned as multiple ranges. Returns: ranges, singles where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order. """ if not index_list: return [], [] first = index_list[0] last = first ranges = [] singles = [] for i in index_list[1:]: if i == last + 1 and (last - first) <= range_size_limit: last = i else: if last > first: ranges.append([first, last]) else: singles.append(first) first = i last = i if last > first: ranges.append([first, last]) else: singles.append(first) return ranges, singles
def remove_dupes(car_list): """Remove duplicate dicts from list""" x = set() cars = [] for i in car_list: if i['title'] not in x: x.add(i['title']) cars.append(i) else: continue return cars
def parse_cluster_slots(resp): """ @see https://redis.io/commands/cluster-slots """ slots = {} for slot in resp: start, end, master = slot[:3] slots[(start, end)] = tuple(master[:2]) return slots
def aqilevel(aqi): """Return AQI level (g,y,o,r,p,m).""" aqi = int(aqi) if aqi <= 50: return 'g' elif aqi <= 100: return 'y' elif aqi <= 150: return 'o' elif aqi <= 200: return 'r' elif aqi <= 300: return 'p' else: return 'm'
def expand_hex(x): """Expands shorthand hexadecimal code, ex: c30 -> cc3300""" if len(x) == 3: t = list(x) return "".join([t[0], t[0], t[1], t[1], t[2], t[2]]) else: return x
def dict_get_nested(d, keys, alt_ret): """Get a nested dictionary entry given a list of keys. Equivalent to d[keys[0]][keys[1]]...etc. Args: d (dict): nested dictionary to search. keys (list): keys to search one by one. alt_ret: returned if the specified item is not found. Returns: item matching the chain of keys in d. """ item = d.get(keys[0], alt_ret) for k in keys[1:]: item = item.get(k, alt_ret) return item
def cygwin_to_windows_path(path): """Turn /cygdrive/c/foo into c:/foo, or return path if it is not a cygwin path. """ if not path.startswith('/cygdrive/'): return path path = path[len('/cygdrive/'):] path = path[:1] + ':' + path[1:] return path
def merge_dicts(*dicts, **kwargs): """ merge_dicts(*dicts, cls=None) Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the passed dicts and therefore, values of rear objects have precedence in case of field collisions. The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is inferred from the first dict object in *dicts*. """ # get or infer the class cls = kwargs.get("cls", None) if cls is None: for d in dicts: if isinstance(d, dict): cls = d.__class__ break else: raise TypeError("cannot infer cls as none of the passed objects is of type dict") # start merging merged_dict = cls() for d in dicts: if isinstance(d, dict): merged_dict.update(d) return merged_dict
def single_line_version(value): """ ensure a version value is represented in a single string When processing version entries, the output may attempt to print out multiple lines. This call helps join the multiple lines together. Args: value: the value to extract a version from Returns: the single-line version string """ return ' '.join(str(value).split())
def mann_whitney_u_r(m, n, u, memo=None): """ Number of orderings in Mann-Whitney U test. The PMF of U for samples of sizes (m, n) is given by p(u) = r(m, n, u) / binom(m + n, m). References ---------- .. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947). """ if u < 0: value = 0 elif m == 0 or n == 0: value = 1 if u == 0 else 0 else: # Don't bother figuring out table construction, memoization # sorts it out if memo is None: memo = {} key = (m, n, u) value = memo.get(key) if value is not None: return value value = (mann_whitney_u_r(m, n - 1, u, memo) + mann_whitney_u_r(m - 1, n, u - n, memo)) memo[key] = value return value
def fibonacci(n): """ Returns the Nth fibonacci number. """ if n == 1: return 0 elif n == 2: return 1 else: return fibonacci(n-1) + fibonacci(n-2)
def gndvi(nir, green): """Normalized Difference NIR/Green Green NDVI boosted with Numba See: https://www.indexdatabase.de/db/i-single.php?id=401 """ return (nir - green) / (nir + green)
def chao1_uncorrected(observed, singles, doubles): """Calculates chao1 given counts. Eq. 1 in EstimateS manual. Formula: chao1 = S_obs + N_1^2/(2*N_2) where N_1 and N_2 are count of singletons and doubletons respectively. Note: this is the original formula from Chao 1984, not bias-corrected, and is Equation 1 in the EstimateS manual. """ return observed + singles**2/float(doubles*2)
def recvs(job, jobson, prec, recv): """ Collect all necessary recvs for job """ if job not in prec: return [] return [recv(jobson[p], jobson[job], p, job) for p in prec[job] if jobson[p] != jobson[job]]
def compare_peaks(peak1, peak2): """Compares two given peaks Args: peak1 (string): The first peak to be compared peak2 (string): The second peak to be compared Returns: int: -1 for smaller, 0 for same, 1 for larger (relative to peak 1) """ peak1_start = peak1[0] peak2_start = peak2[0] if peak1_start == peak2_start: return 0 elif peak1_start > peak2_start: return 1 else: return -1
def get_account_id_from_image_uri(image_uri): """ Find the account ID where the image is located :param image_uri: <str> ECR image URI :return: <str> AWS Account ID """ return image_uri.split(".")[0]
def _keep_common_fields(feature, spec): """Delete the keys of feature that are not in spec.""" if not isinstance(feature, dict): return feature common_keys = set(feature.keys()) & set(spec.keys()) return { key: _keep_common_fields(feature[key], spec[key]) for key in common_keys }
def Apogee(velocity): """This function calculates the highest point in the rocket's trajectory as a function of its instantaneous velocity. Args: velocity (float): from rocket.Velocity(): Current velocity of the Rocket Returns: apogee (float): Highest predicted altitude """ if velocity < 0: return 0 apogee = velocity**2 / (2 * 9.80665) return apogee
def _get_calendar(var): """ Get the proper calendar string from a netcdf file Parameters ---------- var : netCDF4.variable Returns ------- calendar type: string, The type of calendar system used convert : bool True if the calendar needs to be converted to datetime """ # Set up the mapping for calendars default = 1 calendar_conv = [False, False, False, True, True, True, False, False, False] calendar_types = ['standard', 'gregorian', 'proleptic_gregorian', 'noleap', 'julian', 'all_leap', '365_day', '366_day', '360_day'] cals = {v: v for v in calendar_types} cals['gregorian_proleptic'] = 'proleptic_gregorian' # Load the calendar type. If it is incorrectly specified (*cough* ROMS), # change it for cal in ('calendar', 'calendar_type'): if hasattr(var, cal): cal = cals.get(str(getattr(var, cal)).lower(), calendar_types[default]) return cal, calendar_conv[calendar_types == cal] return calendar_types[default], calendar_conv[default]
def js_bool(value): """ Similar to yesno:"true,false,false" """ if not value: return "false" return "true"
def durationToShortText(seconds): """ Converts seconds to a short user friendly string Example: 143 -> 2m 23s """ days = int(seconds / 86400) if days: return '{0}d'.format(days) left = seconds % 86400 hours = int(left / 3600) if hours: hours = '{0}h'.format(hours) else: hours = '' left = left % 3600 mins = int(left / 60) if mins: return hours + '{0}m'.format(mins) elif hours: return hours secs = int(left % 60) if secs: return '{0}s'.format(secs) return '0s'
def parse_subject_from_openssl_output(raw_subject): """Parses the subject value from the raw OpenSSL output. For reference, openssl cmd has different format between openssl versions; OpenSSL 1.1.x formatting: subject=C = CT, ST = "ST,Cs", L = "Locality, Locality", O = Internet Widgits Pty Ltd OpenSSL 1.0.x formatting: subject= /C=US/ST=WA/L=Locality, Locality/O=Internet Widgits Pty Ltd Returns: string : The certificate subject. """ return raw_subject.split("subject=")[1].strip()
def split(numbers): """Only split the left most""" [left, right] = numbers done = False if isinstance(left, int) and left >= 10 and not done: left = [left // 2, -(-left // 2)] done = True elif isinstance(left, list) and not done: left, done = split(left) if isinstance(right, int) and right >= 10 and not done: right = [right // 2, -(-right // 2)] done = True elif isinstance(right, list) and not done: right, done = split(right) return [left, right], done
def get_name_dir(name): """Construct a reasonable directory name from a descriptive name. Replace spaces with dashes, convert to lower case and remove 'content' and 'Repository' if present. """ return name.lower().replace(' ', '-').replace('-content', '')\ .replace('-repository', '')
def last_index_of(array, value): """Return the last index of `value` in the given list, or -1 if it does not exist.""" result = -1 for i in reversed(range(len(array))): entry = array[i] if entry == value or (callable(value) and value(entry)): return i return result
def add_vector(vector, increment, mod = False, bounds = (0, 0)): """ Adds two 2D vectors and returns the result can also do modular arithmitic for wrapping """ result = [0, 0] result[0] = vector[0] + increment[0] result[1] = vector[1] + increment[1] if mod: result[0] = result[0] % bounds[0] result[1] = result[1] % bounds[1] return result
def get_full_name(name: str, prefix: str = '', suffix: str = ''): """Returns a full name given a base name and prefix and suffix extensions Parameters ---------- name: str the base name. prefix: str the prefix (default='') suffix the suffix (default='') Returns ------- full_name: str the fullname """ if prefix: name = f'{prefix}_{name}' if suffix: name = f'{name}_{suffix}' return name
def binarySearch(alist, item, start, end): """ Recursive Binary Search. O(log(n)) time complexity. :type alist of ordered values. :type item to find :type starting index :type ending index """ if start > end: return False mid = (start + end) // 2 if alist[mid] == item: return True elif alist[mid] > item: end = mid - 1 return binarySearch(alist, item, start, end) else: start = mid + 1 return binarySearch(alist, item, start, end)
def update(Args, new_values, args_dict): """ Input list of arguments to modify and list of new values to replace old parameters NOTE: length of [Args] and [new_values] MUST be equal Args: Args (list of strings): list of names of arguments to modify new_values (list): list of values to modify strings args_dict (dict): hash table with parameters and values used for calculation Returns: updated dictionary of parameters for new investment calculation """ for i in range(len(Args)): args_dict[Args[i]] = new_values[i] return args_dict
def has_duplicates(t): """Returns True if any element appears more than once in a sequence. t: list returns: bool """ # make a copy of t to avoid modifying the parameter s = t[:] s.sort() # check for adjacent elements that are equal for i in range(len(s)-1): if s[i] == s[i+1]: return True return False
def check_insert_data(obj, datatype, name): """ Checks validity of an object """ if obj is None: return False if not isinstance(obj, datatype): raise TypeError("{} must be {}; got {}".format(name, datatype.__name__, type(obj).__name__)) return True
def get_first_author_last_name(authorString): """ returns the last name of the first author of one of our normalised author strings. :param authors: :return: """ if authorString: parts = authorString.split(';') if parts: return parts[0].split(",")[0] return None
def detect_protocol_and_get_its_field_names(packet, field_names): """ Tries to find first protocol with defined field names. :param packet: Packet from Packets Parser stage. :param field_names: Dictionary with known protocols and their field names. :return: Protocol field names of the first detected protocol, None otherwise. """ for field_name in field_names: if field_name in packet: return field_names[field_name] return None
def compute_h_score(shed_words_freq_dict, shed_words_happs_dict, verbose=False): """ Compute the happiness score given a shed_words_freq_dict and a shed_words_freq_dict param shed_words_freq_dict: dict of shed_word_ind to shed_word_freq mapping param shed_words_happs_dict: dict of shed_word_ind to shed_word_happs mapping return: a single happiness score """ total_len = sum(shed_words_freq_dict.values()) if verbose: print('(FUNC) LEN: {}'.format(total_len)) # intermediate dict for each shed word contribution to the last total happiness score shed_words_contri_dict = {shed_word_ind: (shed_words_happs_dict[shed_word_ind] * (shed_words_freq / total_len)) for shed_word_ind, shed_words_freq in list(shed_words_freq_dict.items())} if verbose: print('(FUNC) SHED WORDS CONTRIBUTIONS: {}'.format(shed_words_contri_dict)) h_score = sum(shed_words_contri_dict.values()) return h_score
def rreplace(s, old, new, occurrence): """This function performs a search-and-replace on a string for a given number of occurrences, but works from the back to the front. :param s: string to manipulate :param old: substring to search for :param new: substring to replace with :param occurrence: number of occurrences :return: Modified string. """ li = s.rsplit(old, occurrence) return new.join(li)
def isTestDevice(serial_device): """ Determine if this instance is just a Test connection """ if serial_device == 'TEST': return True return False
def early_contract_cps(changepoints, thresh=30): """ Compute changepoints closer than "thresh" (optional argument) days to the start of the contract. CPs should be given as obtained by the model.predict() method from the ruptures package, i.e. a list of positive integers representing days since the start of the contract, with the last element being the final day of the contract. """ early_cps = 0 for cp in changepoints: if (cp > 0) and (cp <= thresh): early_cps += 1 return early_cps
def get_proper_str(str_option, end=False, size=43): """ Method to shorten a string into the proper size for display :param str_option: string to shorten :param end: keep the end of the string visible (default beginning) :return: shortened string """ if len(str_option) > size: if end: return '...%s' % str_option[-(size - 3):] else: return '%s...' % str_option[:(size - 3)] else: return str_option
def merge(d_base, d_in, loc=None): """Adds leaves of nested dict d_in to d_base, keeping d_base where overlapping""" loc = loc or [] for key in d_in: if key in d_base: if isinstance(d_base[key], dict) and isinstance(d_in[key], dict): merge(d_base[key], d_in[key], loc + [str(key)]) elif isinstance(d_base[key], list) and isinstance(d_in[key], list): for item in d_in[key]: d_base[key].append(item) elif d_base[key] != d_in[key]: d_base[key] = d_in[key] else: d_base[key] = d_in[key] return d_base
def get_tn(tp, fp, fn, _all): """ Args: tp (Set[T]): fp (Set[T]): fn (Set[T]): _all (Iterable[T]): Returns: Set[T] """ return set(_all) - tp - fp - fn
def parsePosterLink(value): """ """ if not value: return None return value.strip()
def weight_on_edge(data, zxing): """ This weight is independent of the cell_width. So even, when we have already a good estimation of the cell width, this weight does not need to take the width into account. """ value_before = data[zxing] value_after = data[zxing + 1] slope = value_after - value_before return - value_before / slope
def reverse_linked_list(head): """Reverse linked list. Returns reversed linked list head. """ current_node = head previous_node = None next_node = None while current_node: next_node = current_node.next_node current_node.next_node = previous_node previous_node = current_node current_node = next_node return previous_node
def ewald_input(dat_file, short_cut, kspace_accuracy, dump_file, newton=True): """Create text for Ewald input file.""" if newton is True: ntn = "on" else: ntn = "off" input_text = f"""# Ewald input file newton {ntn} units metal atom_style charge dimension 3 boundary p p p read_data {dat_file} pair_style coul/long {short_cut} pair_coeff * * kspace_style ewald {kspace_accuracy} thermo_style one dump 1 all custom 1 {dump_file} id type x y z fx fy fz dump_modify 1 sort id run 0 """ return input_text
def parseInt(s, ret = 0): """ Parses a value as int """ if not isinstance(s, str): return int(s) elif s: if s[0] in "+-": ts = s[1:] else: ts = s if ts and all([_ in "0123456789" for _ in ts]): return int(s) return ret
def get_disaggregated_fuel_of_reg(submodel_names, fuel_disagg, region): """Get disaggregated region for every submodel for a specific region Arguments ------- submodel_names : dict Name of all submodels fuel_disagg : dict Fuel per submodel for all regions region : str Region Returns ------- region_fuel_disagg : dict Disaggregated fuel for a specific region """ region_fuel_disagg = {} for submodel_name in submodel_names: region_fuel_disagg[submodel_name] = fuel_disagg[submodel_name][region] return region_fuel_disagg
def polygon2bbox(polygons): """ :param polygons: list[list[float]] ,each list[float] is one simple polygon in the format of [x1, y1, ...,xn,yn] :return: list[list[float]], each list[float] is one simple bounding box candidates, list[float]: [x_min, y_max, x_max-x_min, y_max-y_min] """ bboxes = [] for polygon in polygons: x = [item[0] for item in polygon] y = [item[1] for item in polygon] x_min = min(x) x_max = max(x) y_min = min(y) y_max = max(y) # bboxes.append([x_min, y_max, x_max-x_min, y_max-y_min]) bboxes.append([y_min, x_min, y_max, x_max]) return bboxes
def replace(data, match, repl): """Replace values for all key in match on repl value. Recursively apply a function to values in a dict or list until the input data is neither a dict nor a list. """ if isinstance(data, dict): return { key: repl if key in match else replace(value, match, repl) for key, value in data.items() } if isinstance(data, list): return [replace(item, match, repl) for item in data] return data
def compute_area(box): """Compute the area of a box given a set of points x1, y1, x2, y2. Args: box: A list of coordinates. """ return (box[3] - box[1]) * (box[2] - box[0])
def get_s3_key_labels(trained_on): """Return s3 key accordingly to dataset.""" base = 'pretrained_models/image' return '{}/{}/labels.json'.format(base, trained_on)
def _to_int(hex_digit): """turn an hexadecimal digit into a proper integer""" return int(hex_digit, 16)
def dequan(XYZ, t, a=40, b=1.833, d=0.16, e=0.65, p=55, dz=20): """ The Dequan Li Attractor. x0 = (0.1,0,0) """ x, y, z = XYZ x_dt = a * (y - x) + d * x * z y_dt = p * x + dz * y - x * z z_dt = b * z + x * y - e * x**2 return x_dt, y_dt, z_dt
def safe_method(func): """ Marks function as safe to use by the api """ func.safe_method = True return func
def jinja_resilient_splitpart (value, index, split_chars=' - '): """[split a string and return the index] Args: value ([str]): [string to split] index ([int]): [index to return] split_chars (str, optional): [split characters]. Defaults to ' - '. Returns: [str]: [index of string. if index is out of bounds, the original string is returned] """ splits = value.split(split_chars) if len(splits) > index: return splits[index] else: return value
def _get_dequant_var_name(var_name): """ get dequantized var name """ return var_name + '.dequantize'
def factorial(n: int) -> int: """Return the factorial of n, an exact integer >= 0 >>> factorial(4) 24 >>> factorial(10) 3628800 >>> factorial(25) 15511210043330985984000000 >>> factorial(-2) Traceback (most recent call last): ... ValueError: n must be >= 0 """ if n < 0: raise ValueError("n must be >= 0") result = 1 factor = 2 while factor <= n: result *= factor factor += 1 return result
def convert_none(x: float) -> int: """ Helper to use 0 for None values. """ if isinstance(x, str): x = float(x) if x is None: return 0 else: return int(x)
def get_command_line(pid): """ Given a PID, use the /proc interface to get the full command line for the process. Return an empty string if the PID doesn't have an entry in /proc. """ cmd = '' try: with open('/proc/%i/cmdline' % pid, 'r') as fh: cmd = fh.read() cmd = cmd.replace('\0', ' ') fh.close() except IOError: pass return cmd
def _bytes_to_int32(b): """Convert a bytes object containing four bytes into an integer.""" return b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)
def generate_fibonacci_sequence(number_of_terms=2, first_term=0, second_term=1): """ Generates a Fibonacci sequence :param int number_of_terms: the number of terms to be generated including the first and second :param int first_term: first number in the sequence, must be >= 0 :param int second_term: second number in the sequence, must be >= first_term :return [int]: Fibonacci sequence """ try: if number_of_terms < 2: raise ValueError("Number of terms must be >= 2") if first_term < 0: raise ValueError("First term must be >= 0") if second_term < first_term: raise ValueError("Second term must be >= first term") sequence = [first_term, second_term] while len(sequence) != number_of_terms: next_number = sequence[-1] + sequence[-2] sequence.append(next_number) return sequence except TypeError: raise TypeError("Input parameters must be positive integers")
def create_file(file_name): """ create an file """ return open(file_name, "w").close()
def countBy(key, iterable): """ countBy(key: function, iter: iterable) count element numbers of transformed by key() args: key = lambda x: x>0, iter = [0,1,1,2] return: {False:1, True:3} """ ans = {} for x in iterable: kx = key(x) ans[kx] = ans.get(kx, 0) + 1 return ans
def is_cutg_species_label(x): """Checks if x looks like a CUTG label line.""" return ':' in x
def rotate_layer(layer: tuple, rotation_diff: int) -> tuple: """Rotate layer by given rotation angle""" _, _, rotation, distribution = layer new_rotation = rotation + rotation_diff new_rotation = new_rotation % 180 if distribution == "Normal" else new_rotation % 360 new_layer = list(layer) new_layer[2] = new_rotation return tuple(new_layer)
def get_n_keys(dic, titles, curr_n, max_n): """ :param max_n: number of most frequent word requested (default max_dict_length) :return: array of word """ res = list(titles) acc = 0 for w in dic.keys(): w.strip() if w not in titles: res.append(w) acc += 1 if acc >= max_n: break return res
def clean_data(db_connection, run_id, run_data): """ We're going to ignore any schemas in our data that aren't datasource related. """ clean_run_data = [] for datapoint in run_data: if datapoint["schema_name"].startswith("datasource_"): clean_run_data.append(datapoint) return clean_run_data
def mute_string(text): """ Replace contents with 'xxx' to prevent syntax matching. >>> mute_string('"abc"') '"xxx"' >>> mute_string("'''abc'''") "'''xxx'''" >>> mute_string("r'abc'") "r'xxx'" """ start = 1 end = len(text) - 1 # String modifiers (e.g. u or r) if text.endswith('"'): start += text.index('"') elif text.endswith("'"): start += text.index("'") # Triple quotes if text.endswith('"""') or text.endswith("'''"): start += 2 end -= 2 return text[:start] + 'x' * (end - start) + text[end:]
def binstr_int(v): """ """ return int(v, 2)
def i_option(label, value): """ Create an "option item" used by the select `options` field. Parameters ---------- label : str Item label the User sees value : str Item value provided in the dialog submission Returns ------- dict """ return dict(label=label, value=value)
def bubblesort(x): """ Bubble sort: iterate through list n times, swapping adjacent neighbors if they are out of order """ assignments = 0 conditionals = 0 length = len(x) if length < 2: return x,0,0 for iteration in range(length-1): for index in range(length-1): conditionals += 1 if x[index] >= x[index+1]: assignments += 2 new_value = x[index+1] x[index+1] = x[index] x[index] = new_value return x, assignments, conditionals
def command_filename(name): """ Return the base filename for a command """ return name.lower().replace(' ', '-')
def reduceCombinations(currentCombos, nextElementList): """Returns a reduced version of currentCombos with an appropriate combination from nextElementList, checking the last elements in each list in currentCombos, against the first 2 digits in each element in nextElementList.""" newCombos = [] for currentList in currentCombos: for nextElement in nextElementList: if str(currentList[-1])[-2:] == str(nextElement)[:2]: #print(currentList, nextElement) newCombos.append(currentList + [nextElement]) return newCombos
def adamatch_hyperparams(lr=1e-3, tau=0.9, wd=5e-5, scheduler=False): """ Return a dictionary of hyperparameters for the AdaMatch algorithm. Default parameters are the best ones as found through a hyperparameter search. Arguments: ---------- lr: float Learning rate. tau: float Weight of the unsupervised loss. wd: float Weight decay for the optimizer. scheduler: bool Will use a OneCycleLR learning rate scheduler if set to True. Returns: -------- hyperparams: dict Dictionary containing the hyperparameters. Can be passed to the `hyperparams` argument on AdaMatch. """ hyperparams = {'learning_rate': lr, 'tau': tau, 'weight_decay': wd, 'cyclic_scheduler': scheduler } return hyperparams
def to_warp_func(parameter1, parameter2, parameter3): """ Test function. <process> <input name="parameter1" type="float" desc="a parameter."/> <input name="parameter2" type="string" desc="a parameter."/> <input name="parameter3" type="int" desc="a parameter."/> <return> <output name="output1" type="float" desc="an output."/> <output name="output2" type="string" desc="an output."/> </return> </process> """ output1 = 1 output2 = "done" return output1, output2
def is_like_list(something): """ checks if something is iterable but not a string """ if isinstance(something, str): return False return hasattr(something, "__iter__")
def _determine_if_event(four_byte_int): """Checks if event bit is set.""" return four_byte_int >> 31 == 0
def _round_to_4(v): """Rounds up for aligning to the 4-byte word boundary.""" return (v + 3) & ~3
def xpath_query(xpath): """Return xpath if valid, raise an excaption if validation fails.""" if xpath and not xpath.isspace(): return xpath else: raise ValueError("XPath query must not be null or empty string.")
def is_eulerian_tour(graph, tour): """Eulerian tour on an undirected graph :param graph: directed graph in listlist format, cannot be listdict :param tour: vertex list :returns: test if tour is eulerian :complexity: `O(|V|*|E|)` under the assumption that set membership is in constant time """ m = len(tour)-1 arcs = set((tour[i], tour[i+1]) for i in range(m)) if len(arcs) != m: return False for (u, v) in arcs: if v not in graph[u]: return False return True
def solution(K, A): """ DINAKAR Idea is to keep the sum of all items until it reach to given K After that reset window sum and increment found count """ window_sum = 0 count = 0 for i in range(0, len(A)): print("process " + str(A[i])) print("after window_sum with current " + str(window_sum + A[i])) if window_sum + A[i] >= K: count += 1 window_sum = 0 print("found...count " + str(count)) else: window_sum += A[i] return count
def _as_hex_str(bytes, deliminator): """ Pretty-prints bytes as hex values separated by a deliminator """ return deliminator.join(map(lambda x: '%02x' % ord(x), bytes))
def find_property(s, key='name', separator=':', delims=[' ', ',', ';']): """ Find property of the form key+delim+value Example: string = 'ax safsf name:QUAD01, ' should return 'QUAD01' """ match=key+separator ix = s.find(match) if ix == -1: return None # Split out any other delims ss = s[ix+len(match):] for d in delims: ss = ss.split(d)[0] return ss
def list_get(l, idx, default=None): """Get from a list with an optional default value.""" try: if l[idx]: return l[idx] else: return default except IndexError: return default
def find_delimiter(data): """ This function finds and returns the delimiter of the given data. Parameters: data (string): data in which to look for the used delimiter Returns: (string): delimiter used in given data """ if type(data) == str: headers = data.split("\n")[0] else: headers = data.decode("utf-8").split("\n")[0] delimiters = [",", ";", "\t"] # Removed: , "\s", "|" l = {} for d in delimiters: count = 0 for c in headers: if c.find(d) != -1: count += 1 l[d] = count return [k for k, v in l.items() if v == max(l.values())][0]
def stripNewLinesFromStartAndEnd(string): """ String -> String Strips whitespace from the start and end of a string""" return string.lstrip().rstrip()
def list_arg(raw_value): """argparse type for a list of strings""" return str(raw_value).split(",")
def extract_transformations(events: list) -> list: """Extract the transformations from the json object 'events'""" return [e["type"] for e in events if e["type"] != "UNIMPORTANT"]