content
stringlengths
42
6.51k
def is_valid_file(ext, argument): """ Checks if file format is compatible """ formats = { 'input_dataset_path': ['csv'], 'output_model_path': ['pkl'], 'output_dataset_path': ['csv'], 'output_results_path': ['csv'], 'input_model_path': ['pkl'], 'output_test_table_path': ['csv'], 'output_plot_path': ['png'] } return ext in formats[argument]
def p64(x): """Packs an integer into a 8-byte string (little endian)""" import struct return struct.pack('<Q', x & 0xffffffffffffffff)
def index_table_from_name_table(name_table): """Given a Cayley table using element names, return a table that uses position indices. Assumes that the first element in the first row of the table is the identity for the table's algebra. This function is used by the Group constructor. Parameters ---------- name_table : list A square array (list of lists) that contains strings (names of group elements) that represent a group's multiplication table. Returns ------- list The same table using element position indices (int) """ top_row = name_table[0] return [[top_row.index(elem_name) for elem_name in row] for row in name_table]
def make_freq_table(data): """Return a frequency table from the elements of data. >>> d = make_freq_table([1.5, 2.5, 1.5, 0.5]) >>> sorted(d.items()) [(0.5, 1), (1.5, 2), (2.5, 1)] """ D = {} for element in data: D[element] = D.get(element, 0) + 1 return D
def lower(value): # Only one argument. """Converts a string into all lowercase How to Use {{ value|lower|lower|.... }} """ return value.lower()
def sum(values): """Returns the sum of all values in a list. Args: values (list) Returns: float: The sum of all values""" total = 0 for value in values: total += value return total
def _inner(i): """ generate polindron numbers """ limit = int('9' * i) for j in range(limit, 0, -1): for k in range(limit, j, -1): ans = j * k a = str(ans) if a == str(reversed(a)): return print(i, ans % 1337)
def _count_duplicated_locations(locations, new): """ To calculate the count of duplicated locations for new one. :param locations: The exiting subject location set :param new: The new subject location :returns: The count of duplicated locations """ ret = 0 for loc in locations: if loc['url'] == new['url'] and loc['metadata'] == new['metadata']: ret += 1 return ret
def valmap(fn, d): """ Apply function to values of dictionary >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]} >>> valmap(sum, bills) # doctest: +SKIP {'Alice': 65, 'Bob': 45} See Also: keymap """ return dict(zip(d.keys(), map(fn, d.values())))
def _adjacent(imp1, imp2): """Determine if two implicants are adjacent: ie differ on only one variable. Args: imp1 (string): implicant 1 imp1 (string): implicant 2 Returns: (bool) """ differences = 0 match = [] for m1, m2 in zip(imp1, imp2): if m1 == m2: match.append(m1) elif differences: return False else: differences += 1 match.append('2') return "".join(match)
def params_for(prefix, kwargs): """Extract parameters that belong to a given sklearn module prefix from ``kwargs``. This is useful to obtain parameters that belong to a submodule. Examples -------- >>> kwargs = {'encoder__a': 3, 'encoder__b': 4, 'decoder__a': 5} >>> params_for('encoder', kwargs) {'a': 3, 'b': 4} """ if not prefix.endswith('__'): prefix += '__' return {key[len(prefix):]: val for key, val in kwargs.items() if key.startswith(prefix)}
def merge_dictionaries(default_dictionary, user_input_dictionary, path=None): """Merges user_input_dictionary into default dictionary; default values will be overwritten by users input.""" return {**default_dictionary, **user_input_dictionary}
def width2height(bbox, width): """Get optimized height for a given width regarding a known bbox""" x1 = bbox[0] y1 = bbox[1] x2 = bbox[2] y2 = bbox[3] return int(width * (y2 - y1) / (x2 - x1))
def formatted_bytes(bytes_number): """ Given a number of bytes, return a string with a nice format """ if bytes_number >= 1024 ** 4: return f"{bytes_number / 1024 ** 4:.1f} TB" if bytes_number >= 1024 ** 3: return f"{bytes_number / 1024 ** 3:.1f} GB" if bytes_number >= 1024 ** 2: return f"{bytes_number / 1024 ** 2:.1f} MB" if bytes_number >= 1024: return f"{bytes_number / 1024:.1f} kB" else: return f"{bytes_number:.0f} bytes"
def index(l_, i): """return a set of indexes from a list >>> index([1,2,3],(0,2)) (1, 3) """ return tuple([l_[x] for x in i])
def _remove_duplicate_file_types( tokens ): """ Remove duplicate files """ tokens_list = tokens.split( " " ) unique_tokens = '' for item in tokens_list: if item not in unique_tokens and item is not "": unique_tokens = item if unique_tokens == "" else unique_tokens + " " + item return unique_tokens
def str_to_bool(str_): """Convert string to bool.""" if str_ == "True": return True elif str_ == "False": return False else: raise TypeError(str_)
def make_row(unique_row_id, row_values_dict): """row_values_dict is a dictionary of column name and column value. """ return {'insertId': unique_row_id, 'json': row_values_dict}
def _extract_intel_mpi(version_buffer_str): """ Parses the typical Intel MPI library version message, eg: Intel(R) MPI Library 2019 Update 6 for Linux* OS """ return version_buffer_str.split("Library", 1)[1].split("for", 1)[0]
def unique(alist): """Return unique elements from a list. Taken from comments in http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560""" myset = {} return [myset.setdefault(e,e) for e in alist if e not in myset]
def string2array(line): """Converts a sudoku solution in the form 123456789123456789... into 123456789\n123456789\n... a newline separated string every 9 digits. """ result = '' parts = [line[i:i+9] for i in range(0, len(line), 9)] for part in parts: result += part + '\n' return result
def getDirectionFromDelta(delta): """ Gets a direction from x/y delta pair """ if delta == (1, 1): return 2 elif delta == (1, -1): return 4 elif delta == (-1, -1): return 6 elif delta == (-1, 1): return 8 else: error_template = "Unexpected delta value of: {0}" raise ValueError(error_template.format(delta))
def image(image_url, alt_text, **kwargs): """ Create an "Image" block. Parameters ---------- image_url : str alt_text : str Notes ----- API: https://api.slack.com/reference/messaging/blocks#image Other Parameters ---------------- title : text object block_id : str """ return { 'type': 'image', 'image_url': image_url, 'alt_text': alt_text, **kwargs }
def set_nested_value(d: dict, value: dict): """ Changes or, if not present injects, `different_value` into nested dictionary d at key `key: key_2` :param d: {key: {key_2: value, key_1: 1234} :param value: {key: {key_2: different_value}} :return: """ if isinstance(value, dict): for k, v in value.items(): if k in d: if isinstance(d[k], dict): d[k] = set_nested_value(d[k], v) else: d[k] = v else: d[k] = v else: d = value return d
def generate_single_mappings(fields, diff_data, current_data): """Generates single mappings.""" result = {} for field in fields: key = field.name if key not in diff_data: continue current = current_data.get(key, None) or {"id": None, "type": None} diff = diff_data.pop(key, None) or {"id": None, "type": None} if current == diff: continue if diff["id"] is None: result[key] = None elif current["id"] is None or diff["id"] != current["id"]: result[key] = diff return result
def axis_list(n_axes: int, reverse=False) -> str: """Returns a comma-separated list of axis TypeVar short names. Args: n_axes: Maximum number of axes to include in the list. n_axes=1 -> 'A1', n_axes=2 -> 'A1, A2', etc. reverse: If False, the returned list starts from A1 and counts up to An. If True, starts at An and counts down to A1. Returns: A string containing the list of axes. For example, get_axis_list(2) -> 'A1, A2'. """ axes = range(1, n_axes + 1) if reverse: axes = reversed(axes) return ', '.join(f'A{i}' for i in axes)
def get_p9_value(my_list): """return the value which is > 90% list value""" new_list = my_list[:] new_list.sort() p9_value = new_list[int(len(new_list) * 0.9)] return p9_value
def _create_cummin_perm(axis, x_shape): """Insure axis is in [-len(x_shape),len(s_shape)-1]""" len_axis = len(x_shape) if not isinstance(axis, int): raise TypeError(f"The date type of 'axis' should be Int, but got {axis}.") if axis < -len_axis or axis > len_axis: raise ValueError(f"The value of axis should be in [{-len_axis}, {len_axis}], but got {axis}.") prem = [i for i in range(len_axis)] if axis < 0: axis = axis + len_axis prem[0], prem[axis] = axis, 0 prem = tuple(prem) return prem
def val_default(val, default=True): """Value or default (for attributes) """ if val is None: return default return val
def DMStoN (degrees, minutes, seconds): """ Get a decimal number from degrees, minutes and seconds. """ decimal = degrees + float(minutes) / 60.0 + float(seconds) / 3600.0 return decimal
def getDataSetUuid(xmlfile): """ Quickly retrieve the uuid from the root element of a dataset XML file, using a streaming parser to avoid loading the entire dataset into memory. Returns None if the parsing fails. """ try: import xml.etree.cElementTree as ET for event, element in ET.iterparse(xmlfile, events=("start",)): return element.get("UniqueId") except Exception: return None
def sqrt_btn(value): """Return the square root of the current value.""" try: x = float(value) if x > 0: return x ** (1/2) else: return "ERROR" except ValueError: return "ERROR"
def _human_to_bytes(size_str): """ returns the size in bytes supported formats KB, MB, GB, TB, PB, EB, ZB, YB KB to bytes = (* 1024) == << 10 MB to bytes = (* 1024 * 1024) == << 20 GB to bytes = (* 1024 * 1024 * 1024) == << 30 """ convert = {'KB': 1, 'MB': 2, 'GB': 3, 'TB': 4, 'PB': 5, 'EB': 6, 'ZB': 7, 'YB': 8} s = size_str.split() if s[1] not in convert: raise "unknown size format" + size_str return int(s[0]) << (10 * convert[s[1]])
def C_filter(line): """Internal filter to change tabulation into 8 whitespaces.""" return line.replace('\t', ' ' * 8)
def _add_arbitrary_instruction(instruction, **kwargs): """Return `instruction`.""" comment = "# User-defined instruction\n" return comment + instruction
def __parseWord(word): """Parse a word from a tablefile""" word = word.strip() if len(word) == 0: return None if word[-1] == '"' and word[0] == '"': return word[1:-1] try: ret = float(word) return float(ret) except: return word
def rcusetup_args2cmds(bits, attenuation, mode=None): """ Convert RCU arguments to command lines """ rcusetup_cmds = [] rcusetup_cmd = "rspctl --bitmode=" + str(bits) rcusetup_cmds.append(rcusetup_cmd) if attenuation: # NOTE attenuation only set when beamctl is runnning. rcusetup_cmd = "rspctl --rcuattenuation=" + str(attenuation) rcusetup_cmds.append(rcusetup_cmd) if mode: rcusetup_cmd = "rspctl --mode=" + str(mode) rcusetup_cmds.append(rcusetup_cmd) return rcusetup_cmds
def seed(seed, row, col, field): """ place life in some square or empty it """ new_row = [field[row][:col] + [seed] + field[row][col + 1:]] new_field = field[:row] + new_row + field[row + 1:] return new_field
def round_filters(filters, width_coefficient, depth_divisor, min_depth): """ Calculate and round number of filters based on depth multiplier. """ filters *= width_coefficient min_depth = min_depth or depth_divisor new_filters = max( min_depth, int(filters + depth_divisor / 2) // depth_divisor * depth_divisor ) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += depth_divisor return int(new_filters)
def twilights(twilight_evening, twilight_morning, obs_windows, verbose = False): """ Confine observation timing constraints within nautical twilights. Parameters ---------- twilight_evening : '~astropy.tot_time.core.Time' array Evening twilight tot_time for scheduling period (UTC) twilight_morning : '~astropy.tot_time.core.Time' array Morning twilight tot_time for scheduling period (UTC) obs_windows : list of '~astropy.tot_time.core.Time' pairs, or None Observation timing window tot_time-pairs in UTC. Each observation can have any number of tot_time windows. Returns ------- new_windows : list of lists of '~astropy.tot_time.core.Time' pairs or None New list of tot_time windows constrained within twilights. """ new_windows = [] if obs_windows is not None and len(obs_windows) != 0: for i in range(len(twilight_evening)): # cycle through nights if verbose: print('\ntwilights: ', twilight_evening[i].iso, twilight_morning[i].iso) for j in range(len(obs_windows)): # cycle through tot_time windows if verbose: print('time_const[' + str(j) + ']:', obs_windows[j][0].iso, obs_windows[j][1].iso) # save tot_time window if there is overlap with schedule period if obs_windows[j][0] < twilight_morning[i] and twilight_evening[i] < obs_windows[j][1]: # Add window with either twilight times or window edges as boundaries (whichever are innermost). new_windows.append([max([twilight_evening[i], obs_windows[j][0]]), min([twilight_morning[i], obs_windows[j][1]])]) if verbose: print('\tadded:', max([twilight_evening[i], obs_windows[j][0]]).iso, min([twilight_morning[i], obs_windows[j][1]]).iso) if verbose: print('new_windows:') [print('\t', new_window[0].iso, new_window[1].iso) for new_window in new_windows] if len(new_windows) == 0: return None else: return new_windows else: return None
def binary_search_find_root(number, min_v, max_v, accuracy, n): """ Use to find nth dimension root of number using binary search. Precondition: max > min. Precondition: number < max :param number: Number to find square root of. :param min_v: lower bound, start at 0. :param max_v: upper bound, max value of number. :param accuracy: accuracy to round to for decimal points. :param n: nth dimension root to find (square = 2, cube = 3, 4, 5 .. n) :return: """ if max_v <= min_v: # Can't find return -1 mid_val = (max_v + min_v) / 2 # Returns a float. if round(mid_val ** n, accuracy) == number: return mid_val elif mid_val ** n > number: # Need to make mid_val**2 less so it matches number. return binary_search_find_root(number, min_v, mid_val, accuracy, n) # Look at values between min and mid, discard > mid point. elif mid_val ** n < number: return binary_search_find_root(number, mid_val, max_v, accuracy, n)
def cartesian_product(sequences): """ Returns the cartesian product of the tuples in the list sequences """ # Base case, if we only have one tuple in the list containing an # empty tuple if len(sequences) == 0: return([()]) # Otherwise, recurse return([(i,) + j for j in cartesian_product(sequences[1:]) for i in sequences[0]])
def translate(point, translation): """Translates a 2D point.""" px, py = point tx, ty = translation return [px + tx, py + ty]
def find_field(item_list, cond, comparator, target_field): """Finds the value of a field in a dict object that satisfies certain conditions. Args: item_list: A list of dict objects. cond: A param that defines the condition. comparator: A function that checks if an dict satisfies the condition. target_field: Name of the field whose value to be returned if an item satisfies the condition. Returns: Target value or None if no item satisfies the condition. """ for item in item_list: if comparator(item, cond) and target_field in item: return item[target_field] return None
def _append_spc_date_to_storm_ids(primary_id_strings, spc_date_string): """Appends SPC date to each storm ID. N = number of storm objects :param primary_id_strings: length-N list of primary IDs. :param spc_date_string: SPC date (format "yyyymmdd"). :return: primary_id_strings: Same as input but with new IDs. """ return [ '{0:s}-{1:s}'.format(p, spc_date_string) for p in primary_id_strings ]
def vac_to_air_apogee(vac_wvs, a=0.0, b1=5.792105e-2, b2=1.67917e-3, c1=238.0185, c2=57.362): """ converts vaccuum wavelengths in angstroms to air wavelengths. the default constants are those suggested by APOGEE. """ #assume that the vac_wvs are provided in angstroms #micron_wvs = vac_wvs/10**4 #actually when I make plots doing the comparison it looks like they really #wanted angstroms as input anyway... micron_wvs = vac_wvs #the coefficients given by apogee are for wavelengths in microns so use that inv_sq_vac = micron_wvs**-2 refractive_index = (1.0+a+b1/(c1-inv_sq_vac) + b2/(c2-inv_sq_vac)) return vac_wvs/refractive_index
def bboxes_to_pixels(bbox, im_width, im_height): """ Convert bounding box coordinates to pixels. (It is common that bboxes are parametrized as percentage of image size instead of pixels.) Args: bboxes (tuple): (xmin, xmax, ymin, ymax) im_width (int): image width in pixels im_height (int): image height in pixels Returns: bboxes (tuple): (xmin, xmax, ymin, ymax) """ xmin, xmax, ymin, ymax = bbox return xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height
def parse_object(response, infotype): """ Parse the results of an OBJECT command """ if infotype in ('idletime', 'refcount'): return int(response) return response
def _parse_chap_header(headers): """Parses the X-CHAP header. CHAP headers are encoded like: X-CHAP:request:negz X-CHAP:challenge:butts X-CHAP:response:moo X-CHAP:token:zomgauthentication Each HTTP request or response should have a single X-CHAP header. Args: headers: A case insensitive dictionary of HTTP headers. Returns: A tuple like (chap_header_key, chap_header_value). """ return tuple([s.strip() for s in headers['X-CHAP'].split(':', 1)])
def validate_token(token): """ Just a utility method that makes sure provided token follows a 'Bearer token' spec :param token: A JWT token to be validated :return: The token itself, None if not valid :rtype: str """ if token: # Token should be string with 'Bearer token' tokens = token.split(' ') # Make sure first token is Bearer if (len(tokens) == 2) and (tokens[0] == 'Bearer'): # Make sure token can be splitted by 3 using '.' elements = tokens[1].split('.') if len(elements) == 3: return tokens[1] else: raise Exception else: raise Exception else: return None
def get_regex_first_group(regex): """ >>> get_regex_first_group(re.match('In order to (.+)', 'In order to avoid duplication')) 'avoid duplication' """ if regex: return regex.group(1) return None
def parse_text_rules(text_lines): """ A helper function to read text lines and construct and returns a list of dependency rules which can be used for comparison. The list is built with the text order. Each rule is described in the following way: {'target': <target name>, 'dependency': <set of dependent filenames>} """ rules = [] for line in text_lines: if line.strip() == "": continue rule = {'target': line.split(': ')[0].strip(), 'dependency': set(line.split(': ')[-1].strip().split(' '))} rules.append(rule) return rules
def _safe_list_get(lst: list, index: int, default): """Returns a default if index is out of bounds""" try: return lst[index] except IndexError: return default
def uniform_forward(X, shift=20): """Transform a power law distribution with k=2 into a uniform distribution.""" return X / (X + 1 + shift)
def underscore_to_dash(name: str) -> str: """Convert underscore to dash and drop final dash if present.""" converted = name.replace('_', '-') return converted if converted[-1] != '-' else converted[:-1]
def find_greatest_sum_of_sub_array(lst: list) -> int: """ Parameters ----------- Returns --------- Notes ------ """ if not lst: return 0 curr, res = 0, lst[0] for i in lst: if curr <= 0: curr = i else: curr += i if curr > res: res = curr return res
def egcd(a, b): """ Extended euclidean algorithm: Iterative version https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm """ x,y, u,v = 0,1, 1,0 while a != 0: q, r = b//a, b%a m, n = x-u*q, y-v*q b,a, x,y, u,v = a,r, u,v, m,n gcd = b return gcd, x, y
def largest_numbering(n1, n2): """ function name: largest_numbering parameters: int, float return: largest number of two inputed numbers """ return n1 if n1>n2 else n2
def index_modifier(index, modifier, index_mod=0): """ Modify and index based on the sub division, whilst also allow to modify the current index by an int """ return (index + index_mod) + ((index + index_mod) * modifier)
def lengthOfLongestSubstring(s): """ :type s: str :rtype: int """ list = [] max_size = 0 i = 0 while i < len(s): if not s[i] in list: list.append(s[i]) else: max_size = max(max_size, len(list)) list = list[list.index(s[i]) + 1:] list.append(s[i]) i += 1 max_size = max(max_size, len(list)) return max_size
def mock_status_987(url, request): """ Mock a status lookup for a purge request with an id of 987 """ return {'status_code': 200, 'content-type': 'application/json', 'server': 'Apache', 'content': { "originalEstimatedSeconds": 420, "progressUri": "/ccu/v2/purges/987", "originalQueueLength": 0, "purgeId": "987", "supportId": "987654321", "httpStatus": 200, "completionTime": None, "submittedBy": "myself", "purgeStatus": "In-Progress", "submissionTime": "2014-05-09T20:50:36Z", "pingAfterSeconds": 60}}
def convert_size(num, unit): """ This function is used to convert data size typed into bytes """ return int(num) * (1024 ** (["bytes", "KB", "MB", "GB"].index(unit)))
def get_expected_first_bin(begin, freq): """Get the first bin of a given frequency based on the begin ts of a timeseries query.""" # Determine the first bin in the series based on the begin # timestamp in the timeseries request. # # Bin count math will round down to last bin but timerange queries will # return the next bin. That is, given a 30 second bin, a begin # timestamp of 15 seconds past the minute will yield a bin calc # of on the minute, but but the time range query will return # 30 seconds past the minute as the first result. # # A begin timestamp falling directly on a bin will return # that bin. bin = (begin/freq)*freq if bin < begin: return bin+freq elif bin == begin: return bin else: # Shouldn't happen raise RuntimeError
def organize_address(row): """ Organizes a row of dictionary data into a format compatible with the format in the yaml files. Address Dict {'address_lines': ['Room 443'], 'city': 'Arlington', 'state': 'VA', 'street': '2300 Clarendon Boulevard', 'zip': '22201'} Will only return the dict if street, city, state, and zip are present. """ address_dict = {} if row['Street Address']: address_dict['street'] = row['Street Address'] if row['Room Number']: address_dict['address_lines'] = [row['Room Number']] if row['City'] and row['State'] and row['Zip Code'] \ and row['Street Address']: address_dict.update({ 'street': row['Street Address'].strip(), 'city': row['City'].strip(), 'state': row['State'].strip(), 'zip': str(row['Zip Code']).replace('.0', '') }) return address_dict
def make_list(value): """ Takes a value and turns it into a list if it is not one !!!!! This is important becouse list(value) if perfomed on an dictionary will return the keys of the dictionary in a list and not the dictionay as an element in the list. i.e. x = {"first":1, "second":2} list(x) = ["first", "second"] or use this [x,] make_list(x) =[{"first":1, "second":2}] :param value: :return: """ if not isinstance(value, list): value = [value] return value
def this_is_a_keyword(arg1): """Das ist die Doku des Keywords""" print(arg1) return 'Whatever'
def Bold(text): """ Takes a string and returns it bold. """ return '\033[1m'+text+'\033[0m'
def listrange2dict(L): """ task 0.6.4 procedure taking a list output should be a dictionary of the given list e.g. input L=['A', 'B', 'C'] output={0:'A', 1:'B', 2:'C'} """ return {i:k for i,k in enumerate(L)}
def convertToCPM(dose, period): """ Converts from milliSieverts/hr to CPM Parameters: dose (double): The dosage period (double): The time period over which the dosage is administered Returns the measurement in CPM """ conversionFactor = 350000 / 1.0 return conversionFactor * dose / period
def chi2(observed, expected): """ Return the chi2 sum of the provided observed and expected values. :param observed: list of floats. :param expected: list of floats. :return: chi2 (float). """ if 0 in expected: return 0.0 return sum((_o - _e) ** 2 / _e ** 2 for _o, _e in zip(observed, expected))
def clean_string(string): """ Cleans the string before creating a GraphQL query. `\n`, `\\`, `\r`, etc.. cause issues. :param string: String that needs to be formatted :return: Formatted string compatible with GraphQL Query. """ cleaned_string = str(string).replace("\n", " ").replace('"', " ").replace("\\", '').replace("\r", "") return cleaned_string
def is_image_file(filename): """ Check if file is an image :param filename: file name string :return: Boolean toggle """ return any(filename.endswith(extension) for extension in ['.bmp', '.png', '.jpg', '.jpeg', '.JPG', '.JPEG', '.PNG'])
def _energy_test_statistic_coefficient(n, m): """Coefficient of the test statistic.""" return n * m / (n + m)
def place(state, x, y, symbol): """ In the current `state`, player `symbol` makes a mark at place `x`, `y`. Returns a new, copied `state`. """ state = list(state) assert state[y * 3 + x] is None state[y * 3 + x] = symbol return tuple(state)
def validate_count(ctx, param, value): """Takes a string, removes the commas and returns an int.""" try: count = value.replace(",", "") return int(count) except ValueError: return int(value)
def is_permutation(a: int, b: int) -> bool: """Returns boolean if a and b are permutations of each other.""" s_a, s_b = str(a), str(b) if set(s_a) != set(s_b): return False if len(s_a) != len(s_b): return False return sorted(list(s_a)) == sorted(list(s_b))
def parse_range(s): """ Parses a string containing an integer or a range of integers. Accepts strings such as '123' or '1-20'. """ lims = [int(x) for x in s.split('-')] if len(lims) > 2: raise ValueError("Expected 'x-y' where x and y are integers. Got %s instead!" % s) return lims
def init_crop_region(image_height, image_width): """Defines the default crop region. The function provides the initial crop region (pads the full image from both sides to make it a square image) when the algorithm cannot reliably determine the crop region from the previous frame. """ if image_width > image_height: box_height = image_width / image_height box_width = 1.0 y_min = (image_height / 2 - image_width / 2) / image_height x_min = 0.0 else: box_height = 1.0 box_width = image_height / image_width y_min = 0.0 x_min = (image_width / 2 - image_height / 2) / image_width return { 'y_min': y_min, 'x_min': x_min, 'y_max': y_min + box_height, 'x_max': x_min + box_width, 'height': box_height, 'width': box_width }
def mapc2p(xc,yc): """ Specifies the mapping to curvilinear coordinates """ import numpy as np # Polar coordinates (x coordinate = radius, y coordinate = theta) xp = xc * np.cos(yc) yp = xc * np.sin(yc) return xp,yp
def measure_diff(fakes, preds): """Measures difference between ground truth and prediction fakes (float array): generated "true" global scores preds (list list): list of [video_id: int, criteria_name: str, score: float, uncertainty: float] in same order Returns: (float): 100 times mean squared distance between ground truth and predicted score """ diff = 0 for fake, pred in zip(fakes, preds): f, p = round(fake, 2), pred[2] diff += 100 * abs(f - p) ** 2 return diff / len(preds)
def line_filter_and_transform(filter_re, transform_re, repl, lines): """ Transform and filter lines. Filter out lines that match filter_re. Transform remaining lines by transform_re and repl as a regular expression replace. If the regular expression is not matched, then the line is output as is. """ output = [] for line in lines: match = filter_re.match(line) if match: # Drop line matching filter_re continue output.append(transform_re.sub(repl, line)) return output
def gen_key_i(i, kappa, K): """ Create key value where key equals kappa, except: key_(i mod n) = kappa_(i mod n) XOR K_(i mod n) Parameters: i -- integer in [0,n-1] kappa -- string K -- string Return: key -- list of bool """ # Transform string into list of booleans kappa = list(kappa) kappa = [bool(int(j)) for j in kappa] K = list(K) K = [bool(int(j)) for j in K] # Initialize new key value key_i = kappa.copy() # XOR at indice i key_i[i] = K[i] ^ kappa[i] return key_i
def split_indexed_tag(tag): """ Splits the created tag for the api. """ tag = tag.lower() tag = tag.split("->") return tag
def diff_response_time_dicts(latest, old): """ Returns the delta between two {response_times:request_count} dicts. Used together with the response_times cache to get the response times for the last X seconds, which in turn is used to calculate the current response time percentiles. """ new = {} for time in latest: diff = latest[time] - old.get(time, 0) if diff: new[time] = diff return new
def extract_returns(val, returns): """ Recursive method that extract all the returns name available in the present queries :param val: the IIR sub value, already recursively rebuilt :param returns: the support set containing the return value, it should be empty on the first iteration """ if type(val) is not dict: return returns for k, v in val.items(): if k == 'args': continue if type(v) is dict: extract_returns(v, returns) elif type: returns.add(k) return returns
def rgG_to_RGB(r, g, G): """ Convert rg chromacity to RGB :param r: Red chromacity :param g: Green chromacity :param G: Green Value :return: RGB tuple """ R = (r * G) / g B = ((1 - r - g) * G) / g return R, G, B
def log(stop=False, **kwds): """ Helper to ease putting watchpoints Synopsis: - set a conditional breakpoint - write the condition as some_condition and log(some_variable=some_variable, ...) - debug the code The debugger won't stop at that breakpoint but it will log name and values of the specified variables each time it passes over that breakpoint if and only if the given condition is true. Remark: Passing stop=True makes it a real breakpoint """ for k,v in kwds.items(): print("%s = %s" % (k,v)) return stop
def createLockers (num=999): """Creates a dict for locker info. Locker keys are numerical strings with the same number of digits as num. Leading zeros are added as necessary. Each locker key is assigned a value of "open". :param int num: :return: The locker info dict :rtype: dict[str, str] """ lockers = {} digits = len(str(num)) for i in range(1, num + 1): zeroes = digits - len(str(i)) locker = "0" * zeroes + str(i) lockers[locker] = "open" return lockers
def get_digit(value, arg): """ Given a whole number, returns the requested digit of it, where 1 is the right-most digit, 2 is the second-right-most digit, etc. Returns the original value for invalid input (if input or argument is not an integer, or if argument is less than 1). Otherwise, output is always an integer. """ try: arg = int(arg) value = int(value) except ValueError: return value # Fail silently for an invalid argument if arg < 1: return value try: return int(str(value)[-arg]) except IndexError: return 0
def sales_record(record: list, which: tuple) -> list: """ Add one sale to the sepcified salesperson and the product number. Return a list with the updated sale record. ----------------------- Parameters: - record: list Two-dimensional array that contains the record of each salesperson's sale. - which: tuple The first argument specifies which product (row), the second argument specifies which salesperon (col). """ row = which[0] - 1 col = which[1] - 1 record[row][col] += 1 return record
def _convert_title(title: str) -> str: """Converts a title to a filename that can be used.""" blacklist = ['\\', '/', ':', '*', '?', '"', '<', '>', '|', '\0'] download_prefix = 'Download ' title = title[len(download_prefix):] title = "".join(c for c in title if c not in blacklist) return title
def convert_user_dict_format(config_user_dict): """ changes user supplied dictionary from html form to valid values python dictionary for downstream config input changes on/off flag to true/false and string numeric values python float values :param config_user_dict: :return: """ for key, value in config_user_dict.items(): # print(key, value) if value == "on": config_user_dict[key] = True elif value == "off": config_user_dict[key] = False elif value != None and value.strip != "": try: config_user_dict[key] = float(value) except: pass return config_user_dict
def prod(xs): """Computes the product along the elements in an iterable. Returns 1 for empty iterable. Args: xs: Iterable containing numbers. Returns: Product along iterable. """ p = 1 for x in xs: p *= x return p
def get_value(dct: dict, *keys): """access dict with given keys""" for key in keys: dct = dct.get(key, {}) return dct
def rsieve(n): """ This is the recursive solution for the problem. For this example I decided to use sets so I could group all the multiples together and then remove them from the `numbers` set at once. (This is not necessarily faster). """ numbers = set(range(2, n + 1)) def _sieve(k): multiples = {k * i for i in range(2, n + 1) if k * i <= n} if k == 2: return multiples return multiples.union(_sieve(k - 1)) # notice that I use square n rather than n, this is the same check done for the iterative version return numbers.difference(_sieve(int(n ** 0.5)))
def message_level_number(message_level): """ Level number from message level tag. """ result = 0 if message_level == "error(parsing)": result = 1 if message_level == "error(2)": result = 2 if message_level == "error(mac)": result = 2 if message_level == "error(3)": result = 3 return result
def distance_rectangle_point(rect, point): """ Return the distance (fast) from a rectangle ``(x, y, width,height)`` to a ``point``. >>> distance_rectangle_point(Rectangle(0, 0, 10, 10), (11, -1)) 2 >>> distance_rectangle_point((0, 0, 10, 10), (11, -1)) 2 >>> distance_rectangle_point((0, 0, 10, 10), (-1, 11)) 2 """ dx = dy = 0 px, py = point rx, ry, rw, rh = tuple(rect) if px < rx: dx = rx - px elif px > rx + rw: dx = px - (rx + rw) if py < ry: dy = ry - py elif py > ry + rh: dy = py - (ry + rh) return abs(dx) + abs(dy)
def get_starting_magnetization_pw(pw_output): """ From the output of a PW calculation, get the atomic magnetic moment per unit charge and build the corresponding restart magnetization to be applied to a subsequent calculation. :param pw_output: dictionary with the output of a PW calc. :return: a dictionary of the form: {'starting_magnetization': {specie_name_a: starting mag. for a, specie_name_b: starting mag. for b} 'angle1' (optional, for SOC calc.): {specie_name_a: angle1 for a, specie_name_b: angle1 for b} 'angle2' (optional, for SOC calc.): {specie_name_a: angle2 for a, specie_name_b: angle2 for b} } """ import numpy as np if 'atomic_magnetic_moments' not in pw_output: return {} mag_moments = (np.array(pw_output['atomic_magnetic_moments'])/ np.array(pw_output['atomic_charges'])).tolist() species_name = pw_output['atomic_species_name'] start_mag = dict([(kind_name,round(np.average([mom for k,mom in zip(species_name,mag_moments) if k==kind_name]),3)) for kind_name in set(species_name)]) result_dict = {'starting_magnetization': start_mag} if ('atomic_magnetic_theta' in pw_output and 'atomic_magnetic_phi' in pw_output): theta = pw_output['atomic_magnetic_theta'] phi = pw_output['atomic_magnetic_phi'] result_dict['angle1'] = dict([(kind_name,round(np.average([th for k,th in zip(species_name,theta) if k==kind_name]),3)) for kind_name in set(species_name)]) result_dict['angle2'] = dict([(kind_name,round(np.average([ph for k,ph in zip(species_name,phi) if k==kind_name]),3)) for kind_name in set(species_name)]) return result_dict
def set_logger(new_logger): """ Set the global logger for pypd to use. Assumes a logging.Logger interface. """ global logger logger = new_logger return logger
def ConstructGoldenEyeBuildDetailsURL(build_id): """Return the dashboard (goldeneye) URL for this run. Args: build_id: CIDB id for the build. Returns: The fully formed URL. """ _link = ('http://go/goldeneye/' 'chromeos/healthmonitoring/buildDetails?id=%(build_id)s') return _link % {'build_id': build_id}