content
stringlengths
42
6.51k
def clean_up_interface_vlan_configs(proposed, existing): """Removes unecessary and unused configs i.e. removes access vlan configs if a trunk port is being configured or removes trunk port/vlan configs if access vlan is being configured Args: proposed (dict): proposed configuration params & values existing (dict): existing as-is configuration params & values both args are being sent from the Ansible module Returns: list: ordered list of commands to be sent to device Note: Specific for Ansible module(s). Not to be called otherwise. """ commands = [] if proposed['mode'] == 'access': if existing['native_vlan'] != '1': commands.append('no switchport trunk native vlan') if existing['trunk_vlans'] != '1-4094': commands.append('no switchport trunk allowed vlan') if existing['mode'] == 'trunk': commands.append('no switchport mode trunk') elif proposed['mode'] == 'trunk': if existing['mode'] == 'access': commands.append('no switchport access vlan') if commands: commands.insert(0, 'interface ' + proposed['interface']) return commands
def coord_to_1d_index(x, y, width): """ Returns the 1 dimensional array index for the given x/y coordinate and map width. """ return y * width + x
def ways_have_matching_objects(way1, way2): """Check if way1 and way2 have matching objects in path.""" return way1[-1] == way2[-1]
def text2int(textnum, numwords={}): """Function that converts number descriptions written in English to integer. Args: textnum (str): Number despcription written in English. Keyword Args: numwords (dict): Dictionary that maps words to numbers. Returns: int: Integer version of given number description in string format. """ if not numwords: units = [ "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ] tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] scales = ["hundred", "thousand", "million", "billion", "trillion"] numwords["and"] = (1, 0) for idx, word in enumerate(units): numwords[word] = (1, idx) for idx, word in enumerate(tens): numwords[word] = (1, idx * 10) for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0) current = result = 0 for word in textnum.split(): if word not in numwords: raise Exception("Illegal word: " + word) scale, increment = numwords[word] current = current * scale + increment if scale > 100: result += current current = 0 return result + current
def get_token(s): """ Unary minus sign is a big bug """ s = s.strip() for pat in ["+", "-", "*", "/", "(", ")", "cos(", "sin("]: if s.startswith(pat): return s[:len(pat)], s[len(pat):] for i, ch in enumerate(s): #print("ch:", ch) if not ch.isalnum(): break else: return s, "" return s[:i], s[i:]
def get_key_lists_index(obj: object, dct: dict, idx: int = 0) -> object: """ @brief Get the key in the total list dict. The dict will like:{"a":list(), "b":list(), ...} @param obj The finding object. @param dct The dict. @param idx The index to look-up in the each list. """ for key in dct: if dct[key][idx] == obj: return key
def are_same_endpoint(first, second): """Equivalence check of two urls, stripped of query parameters""" def strip(url): return url.replace('www.', '').split('?')[0] return strip(first) == strip(second)
def get_res_unique_name(resource): """Returns a unique name for the resource like pod or CRD. It returns a unique name for the resource composed of its name and the namespace it is created in or just name for cluster-scoped resources. :returns: String with <namespace/>name of the resource """ try: return "%(namespace)s/%(name)s" % resource['metadata'] except KeyError: return "%(name)s" % resource['metadata']
def get_series_ids(panel_info): """ Get a string containing all series ids for a panel :param panel_info: information about a panel :return: string containin panel ref ids separated by underscores """ series_ids = [series['refId'] for series in panel_info['targets']] return '_'.join(series_ids)
def rd(a, b, p): """ one step of the restoring division algorithm""" return(bin(int(str(a),2)-int(str(b),2)*2**p))
def robot_move(row, col, k): """ :param row: row of matrix, m :param col: col of matrix, n :param k: bit sum limit :return: num of blocks can reach """ def bit_sum(num): """ calculate bit sum :param num: num :return: bit sum """ b_sum = 0 while num: b_sum += num % 10 num = num // 10 return b_sum if row < 1 or col < 1: raise Exception('Invalid Matrix') to_check = [(0,0)] next_check = set() block_count = 0 while to_check: i_cur, j_cur = to_check.pop(0) block_count += 1 if j_cur + 1 < col and bit_sum(i_cur) + bit_sum(j_cur + 1) <= k: next_check.add((i_cur, j_cur+1)) if i_cur + 1 < row and bit_sum(i_cur + 1) + bit_sum(j_cur) <= k: next_check.add((i_cur + 1, j_cur)) if not to_check: to_check.extend(list(next_check)) next_check = set() return block_count
def format_with_keywords(**kwvars: str): """This is a demo exercise of using format() with key/value pairs in order to insert variables into the string. Examples: >>> mytuplelist = [('item1', 'Slot number 1'), ('item2', 'And the 2nd'), ('item3', 'Then the 3rd')]\n >>> mydict = {k:v for k,v in mytuplelist}\n >>> mydict\n {'item1': 'Slot number 1', 'item2': 'And the 2nd', 'item3': 'Then the 3rd'} >>> format_with_keywords(**mydict)\n {'item1': 'Slot number 1', 'item2': 'And the 2nd', 'item3': 'Then the 3rd'}\n 'blah Slot number 1 blah And the 2nd blah Then the 3rd' """ print({**kwvars}) dict_of_vars = {**kwvars} return "blah {item1} blah {item2} blah {item3}".format(**dict_of_vars)
def transpose(data): """Transpose a 2-dimensional list.""" return list(zip(*data))
def booly(arg): """ arg: str representing something boolean-like return: boolean representation of `arg` """ comp = arg.lower() if comp in ('yes', 'y', 'true', 't', '1'): return True elif comp in ('no', 'n', 'false', 'f', '0'): return False else: raise ValueError('Could not convert {!r} to boolean'.format(arg))
def call_user_func(settings_module, func_name, *args, **kwargs): """Call a user-supplied settings function and clean it up afterwards. settings_module may be None, or the function may not exist. If the function exists, it is called with the specified *args and **kwargs, and the result is returned. """ if settings_module: if hasattr(settings_module, func_name): func = getattr(settings_module, func_name) try: return func(*args, **kwargs) finally: # cleanup user function delattr(settings_module, func_name)
def get_payer_information(json_list_of_transactions): """Function that returns payer specific information from each transaction and adds columns relating to user settings.""" # Identifying columns that don't contain values. # Not adding first or last name since they are present in display name keys = (["username", "is_active", "display_name", "is_blocked", "about", "profile_picture_url", "id", "date_joined", "is_group" ]) # Values for default come after having explored the data in eda_venmo.ipynb about_default = ([' ', 'No Short Bio', 'No short bio', '\n', ' \n', ' ', 'No Short Bio\n']) payers = [] payer_ids = set() # Set because I only want to retrieve unique ids for transaction in json_list_of_transactions: if transaction['id'] == '2541220786958500195': continue else: actor = transaction['payment']['actor'] actor_id = actor['id'] if actor_id in payer_ids: continue else: payer_ids.add(actor_id) payer = {} for key in keys: # Determine if their about col is personalised if key == 'about': about = actor.get(key) payer[key] = actor.get(key) if about in about_default: # Col to show if personalised about or not payer['about_personalised'] = 0 else: payer['about_personalised'] = 1 else: payer[key] = actor.get(key) payer['user_id'] = payer.pop('id') payers.append(payer.copy()) # Note, there is a case where a user has no about, date_joined or username. # They have, however, previously made a transaction so we will not drop. return payers, payer_ids
def conflictsWithOtherArc(arc, arcs): """ Check whether an arc is in conflict with any existing arc. Five types of relationship: 1. overlap, always illegal 2. congruence, always illegal 3. containment, requires nesting test 4. co-iniation, requires nesting test 5. co-termination, requires nesting test """ conflict = False for extArc in arcs: # new arc overlaps later arc cond1a = (arc[0] < extArc[0] < arc[-1] < extArc[-1]) # new arc overlaps earlier arc cond1b = (extArc[0] < arc[0] < extArc[-1] < arc[-1]) # new arc is congruent with existing arc cond2 = (extArc[0] == arc[0] and extArc[-1] == arc[-1]) # new arc contains an existing arc cond3a = (arc[0] < extArc[0] and arc[-1] > extArc[-1]) # new arc is contained by an existing arc cond3b = (extArc[0] < arc[0] and extArc[-1] > arc[-1]) # new arc is co-initiated with an existing arc and ends earlier cond4a = (extArc[0] == arc[0] and extArc[-1] > arc[-1]) # new arc is co-initiated with an existing arc and ends later cond4b = (extArc[0] == arc[0] and extArc[-1] < arc[-1]) # new arc is co-terminated with an existing arc and starts earlier cond5a = (extArc[0] > arc[0] and extArc[-1] == arc[-1]) # new arc is co-terminated with an existing arc and starts later cond5b = (extArc[0] < arc[0] and extArc[-1] == arc[-1]) if cond1a or cond1b or cond2: conflict = True # conflicts if internal elements of containing arc # found in contained arc elif cond3a: if len(arc) > 2: for d in arc[1:-1]: if extArc[0] < d < extArc[-1]: conflict = True break elif cond4a: if len(extArc) > 2: for d in extArc[1:-1]: if d < arc[-1]: conflict = True break elif cond5a: if len(arc) > 2: for d in arc[1:-1]: if d > extArc[0]: conflict = True break elif cond3b: if len(extArc) > 2: for d in extArc[1:-1]: if arc[0] < d < arc[-1]: conflict = True break elif cond4b: if len(arc) > 2: for d in arc[1:-1]: if d < extArc[-1]: conflict = True break elif cond5a: if len(extArc) > 2: for d in extArc[1:-1]: if d > arc[0]: conflict = True break return conflict
def snake_to_pascal_case(text): """Converts column_title to ColumnTitle """ return ''.join(map(lambda x: x.capitalize(), text.split('_')))
def std_array(comment, valueType, ndim, **kwargs): """Description for standard array column with variable shape (used for smaller arrays).""" return dict(comment=comment, valueType=valueType, ndim=ndim, dataManagerType='StandardStMan', dataManagerGroup='StandardStMan', _c_order=True, option=0, maxlen=0, **kwargs)
def clean_clip_text(cliptext): """Remove all characters with unicode value > 65535 (the acceptable) range for TCL). """ return "".join(c for c in cliptext if ord(c) <= 65535)
def getCellByPath(rc,output,outerr,parseParamList,Logger): """ Returns the cell to which a file or directory belongs """ cellname="" return cellname
def sign(x): """sign of x, i.e. +1 or -1; returns 1 for x == 0""" if x >= 0: return 1 return -1
def name_circulant(num_vertices, j_value_set): """ Gives a string representation of a given circulant graph. """ return f"Cir [{num_vertices}] [{j_value_set}]"
def get_columns_to_drop(year): """ Returns a list of strings indicating which columns to drop for the data in the given year :param year: the year of the data :return: a list of which columns to drop for the given year; "us" is always dropped """ cols_to_drop = ["us"] # Always drop "us" # cols_to_drop.append("GROUP") cols_to_drop.append("YIB") return cols_to_drop
def update_schema(schema, field_id, new_name=None): """Helper function to find and rename or delete schema""" field_names = [] schema_update = None update_id = None for schema_item in schema: if schema_item['schemaid'] == field_id: schema_update = schema_item elif 'list' in schema_item: update_id = update_schema(schema_item['list'], field_id, new_name) if update_id: return update_id else: field_names.append(schema_item['field']) if schema_update: if new_name: if new_name not in field_names: schema_update['field'] = new_name schema_update['mapped'] = True update_id = field_id else: schema.remove(schema_update) update_id = field_id return update_id
def is_grey_boundary_colour(colour): """Return whether this colour is suitable to be mistaken for the grey boundary""" if colour[0] >= 223 and colour[0] <= 225 and\ colour[1] >= 224 and colour[1] <= 226 and\ colour[2] >= 226 and colour[2] <= 228: return True return False
def StripDoubleUnderscorePrefixes(text: str) -> str: """Remove the optional __ qualifiers on OpenCL keywords. The OpenCL spec allows __ prefix for OpenCL keywords, e.g. '__global' and 'global' are equivalent. This preprocessor removes the '__' prefix on those keywords. Args: text: The OpenCL source to preprocess. Returns: OpenCL source with __ stripped from OpenCL keywords. """ # List of keywords taken from the OpenCL 1.2. specification, page 169. replacements = { "__const": "const", "__constant": "constant", "__global": "global", "__kernel": "kernel", "__local": "local", "__private": "private", "__read_only": "read_only", "__read_write": "read_write", "__restrict": "restrict", "__write_only": "write_only", } for old, new in replacements.items(): text = text.replace(old, new) return text
def is_image(content_type): """is the given content_type string for an image? Args: content_type: string containing Content-Type HTTP header value Returns: Boolean """ return (str(content_type).count("image") > 0)
def convertVCFGenotype(vcfGenotype): """ Parses the VCF genotype """ if vcfGenotype is not None: delim = "/" if "|" in vcfGenotype: delim = "|" if "." in vcfGenotype: genotype = [-1] else: genotype = map(int, vcfGenotype.split(delim)) else: genotype = [-1] return genotype
def is_numeric(s: str) -> bool: """Checks if a string is numeric. :param s: The string to check. :return: True if the value is numeric """ try: int(s) return True except ValueError: return False
def trans_to_pt_list(interp_lane): """Interp lane to x list and y list. :param interp_lane: the raw line :type interp_lane:list :return: x list and y list :rtype:list """ x_pt_list = [] y_pt_list = [] for pt in interp_lane: cur_x = pt['x'] cur_y = pt['y'] x_pt_list.append(cur_x) y_pt_list.append(cur_y) return x_pt_list, y_pt_list
def lldp_capabilities(b: bytes) -> dict: """ Decode LLDP capabilities Args: b: Returns: """ c: list = ["supported", "enabled"] cap: dict = {c[0]: [], c[1]: []} # for example: value b = b'\x00\x04\x00\x04' # (first two bytes - 'supported', less - 'enabled') cap_list: list = ["Other", "Repeater", "Bridge", "WLAN access point", "Router", "Telephone", "DOCSIS cable device", "Station only"] for n in c: i: int = (b[0] << 8) + b[1] for bit in range(len(cap_list)): if (i & (2**bit) ) > 0: cap[n].append(cap_list[bit]) b = b[2:] return cap
def drop_empty_rows(rows): """Takes a list of rows and returns a new list without any blank rows. autom8 considers a row to be blank if it's totally empty, or if each item in the row is None, the empty string, or a string that only contain spaces. >>> drop_empty_rows([]) [] >>> drop_empty_rows([[], [1, 2, 3], [], [4, 5, 6], []]) [[1, 2, 3], [4, 5, 6]] >>> drop_empty_rows([[9, 8, 7], [' ', '\t', ''], [6, 5, 4], [None, ' ']]) [[9, 8, 7], [6, 5, 4]] """ def _is_blank(obj): if isinstance(obj, str): return obj == '' or obj.isspace() else: return obj is None return [row for row in rows if any(not _is_blank(i) for i in row)]
def sample_app(environ, start_response): """ A sample WSGI application that returns a 401 status code when the path ``/private`` is entered, triggering the authenticate middleware to forward to ``/signin`` where the user is prompted to sign in. If the sign in is successful a cookie is set and the user can visit the ``/private`` path. The path ``/signout`` will display a signed out message if and sign the user out if cookie_signout = '/signout' is specified in the middelware config. The path ``/`` always displays the environment. """ if environ['PATH_INFO']=='/private' and not environ.has_key('REMOTE_USER'): start_response('401 Not signed in', []) elif environ['PATH_INFO'] == '/signout': start_response('200 OK', [('Content-type', 'text/plain')]) if environ.has_key('REMOTE_USER'): return ["Signed Out"] else: return ["Not signed in"] elif environ['PATH_INFO'] == '/signin': page = """ <html> <body> %s <form action="/signin"> Username: <input type="text" name="username" /> Password: <input type="password" name="password" /> <br /> <input type="submit" value="Sign in" /> </body> </html> """ if not environ.get('QUERY_STRING'): start_response( '200 Sign in required', [('Content-type', 'text/html')] ) return [page%'<p>Please Sign In</p>'] else: # Quick and dirty sign in check, do it properly in your code params = {} for part in environ['QUERY_STRING'].split('&'): params[part.split("=")[0]] = part.split('=')[1] if params['username'] and params['username'] == params['password']: start_response('200 OK', [('Content-type', 'text/html')]) environ['paste.auth_tkt.set_user'](params['username']) return ["Signed in."] else: start_response('200 OK', [('Content-type', 'text/html')]) return [page%'<p>Invalid details</p>'] start_response('200 OK', [('Content-type', 'text/plain')]) result = ['You Have Access To This Page.\n\nHere is the environment...\n\n'] for k,v in environ.items(): result.append('%s: %s\n'%(k,v)) return result
def form_fastqc_cmd_list(fastqc_fp, fastq_fp, outdir): """Generate argument list to be given as input to the fastqc function call. Args: fastqc_fp(str): the string representing path to fastqc program fastq_fp(str): the string representing path to the fastq file to be evaluated outdir(str): the string representing the path to the output directory Return value: call_args(list): the list of call_args representing the options for the fastqc subprocess call Raises: ValueError is raised when either the fastqc path or the fastqc input files are empty """ # throw exceptions to prevent user from accidentally using interactive fastqc if fastqc_fp is '': raise ValueError('fastqc_fp name is empty') if fastq_fp is '': raise ValueError('fastq_fp file name is empty') # required arguments call_args_list = [fastqc_fp, fastq_fp] # direct output if outdir is not None: call_args_list.extend(["--outdir", outdir]) return call_args_list
def getMem(rec): """Return memory in GBs as float. """ info = rec["product"]["attributes"] return float(info.get("memory").split()[0].replace(",", ""))
def mask(string: str, start: int, end: int, replacement_char="#"): """Mask part of a string with replacement char""" return string[:start] + replacement_char * (end - start) + string[end:]
def scalar_product(vector_1, vector_2): """Scalar Product Args: vector_1 (list): vector_2 (list): Returns: (float): scalar product (vector_1 * vector_2) """ if len(vector_1) != len(vector_2): return 0 return sum(i[0] * i[1] for i in zip(vector_1, vector_2))
def filter_cursor(cursor, key, vals, verbosity=0): """ Returns a cursor obeying the filter on the given key. Any documents that are missing the key will not be returned. Any documents with values that cannot be compared to floats will also not be returned. Parameters: cursor (list): list of dictionaries to filter. key (str): key to filter. vals (list): either 1 value to 2 values to use as a range. The values are interpreted as floats for comparison. Returns: list: list of dictionaries that pass the filter. """ filtered_cursor = list() orig_cursor_len = len(cursor) if not isinstance(vals, list): vals = [vals] if len(vals) == 2: min_val = float(vals[0]) max_val = float(vals[1]) if verbosity > 0: print('Filtering {} <= {} < {}'.format(min_val, key, max_val)) for doc in cursor: try: if doc[key] < max_val and doc[key] >= min_val: filtered_cursor.append(doc) except (TypeError, ValueError, KeyError): pass else: min_val = float(vals[0]) if verbosity > 0: print('Filtering {} >= {}'.format(key, min_val)) for doc in cursor: try: if doc[key] >= min_val: filtered_cursor.append(doc) except (TypeError, ValueError, KeyError): pass if verbosity > 0: print(orig_cursor_len, 'filtered to', len(filtered_cursor), 'documents.') return filtered_cursor
def lin_AAIMON_slope_eq(a, x): """Function for linear slope equation Parameters ---------- a : float Value for the slope x : float Value on the x-axis Returns ------- y : float Y value for linear slope, based on x, where y-axis intercept is 1.0. """ y = a * x + 1 return y
def validate_int(value, label, low=None, high=None): """Validates that the given value represents an integer. There are several ways to represent an integer in Python (e.g. 2, 2L, 2.0). This method allows for all such representations except for booleans. Booleans also behave like integers, but always translate to 1 and 0. Passing a boolean to an API that expects integers is most likely a developer error. """ if value is None or isinstance(value, bool): raise ValueError('Invalid type for integer value: {0}.'.format(value)) try: val_int = int(value) except TypeError: raise ValueError('Invalid type for integer value: {0}.'.format(value)) else: if val_int != value: # This will be True for non-numeric values like '2' and non-whole numbers like 2.5. raise ValueError('{0} must be a numeric value and a whole number.'.format(label)) if low is not None and val_int < low: raise ValueError('{0} must not be smaller than {1}.'.format(label, low)) if high is not None and val_int > high: raise ValueError('{0} must not be larger than {1}.'.format(label, high)) return val_int
def _serialize_config(config): """Turns a config into an integer treating each of the variables as spins. Examples: >>> _serialize_config((0, 0, 1)) 1 >>> _serialize_config((1, 1)) 3 >>> _serialize_config((1, 0, 0)) 4 """ out = 0 for bit in config: out = (out << 1) | (bit > 0) return out
def sub2ind(shape, subtpl): """ Convert 2D subscripts into 1D index. @param shape: Tuple indicating size of 2D array. @param subtpl: Tuple of (possibly) numpy arrays of row,col values. @return: 1D array of indices. """ if len(shape) != 2 or len(shape) != len(subtpl): raise IndexError("Input size and subscripts must have length 2 and " "be equal in length") row, col = subtpl ny, nx = shape ind = nx*row + col return ind
def log_errors(f, self, *args, **kwargs): """decorator to log unhandled exceptions raised in a method. For use wrapping on_recv callbacks, so that exceptions do not cause the stream to be closed. """ try: return f(self, *args, **kwargs) except Exception: self.log.error('Uncaught exception in %r' % f, exc_info=True)
def to_secs(x: str) -> int: """Convert time from hh:mm:ss (str) format to seconds (int). """ h, m, s = x.split(':') return int(h) * 3600 + int(m) * 60 + int(s)
def filter_glyph_names( alist, filter ): """filter `alist' by taking _out_ all glyph names that are in `filter'""" count = 0 extras = [] for name in alist: try: filtered_index = filter.index( name ) except: extras.append( name ) return extras
def find_needle(haystack): """Return needle in haystack.""" return 'found the needle at position {}'.format(haystack.index('needle'))
def find_string_indices(content, search): """ Generate a list that contains all the indices of a string occurrences in another. Args: content (str): string to search within. search (str): string to find occurrences of. Returns: List containing all found indicies after the search string. """ return [ # Use list comprehension for syntactic diabetes. # Add the length of the search to the index like before. i for i in range(len(content)) if content.startswith(search, i) ]
def control_change_rate_cost(u, u_prev): """Compute penalty of control jerk, i.e. difference to previous control input""" return (u - u_prev) ** 2
def add_prefix(info_string, prefix): """Prefixes each line of info_string, except the first, by prefix.""" lines = info_string.split("\n") prefixed_lines = [lines[0]] for line in lines[1:]: prefixed_line = ":".join([prefix, line]) prefixed_lines.append(prefixed_line) prefixed_info_string = "\n".join(prefixed_lines) return prefixed_info_string
def scaled_data(data, domain): """ Scales data to range between -1.0 and 1.0, viz. the domain over which legendre polynomials are defined """ return (2. * data - domain[0] - domain[1]) / (domain[1] - domain[0])
def _all_trybots_finished(try_builds): """Return True iff all of the given try jobs have finished. Args: try_builds: list of TryBuild instances. Returns: True if all of the given try jobs have finished, otherwise False. """ for try_build in try_builds: if not try_build.is_finished: return False return True
def ltruncate_int(value, ndigits): """Truncate an integer, retaining least significant digits Parameters ---------- value : int value to truncate ndigits : int number of digits to keep Returns ------- truncated : int only the `ndigits` least significant digits from `value` Examples -------- >>> ltruncate_int(123, 2) 23 >>> ltruncate_int(1234, 5) 1234 """ return int(str(value)[-ndigits:])
def copy_scope(scope): """Realiza la copia de un scope""" return set([eq.copy() for eq in scope])
def check_connect_4_board(board: list): """ Connect-Four Board State Checker The program or function must take in a Connect 4 board and return true if the board is valid and false if it is not. It is okay to assume whatever board representation you like (e.g. Array of Arrays). The Rules For Connect 4: - Players R and Y take it in turns to drop tiles of their colour into columns of a 7x6 grid. When a player drops a tile into the column, it falls down to occupy the lowest unfilled position in that column. If a player manages to get a horizontal, vertical or diagonal run of four tiles of their colour on the board, then they win and the game ends immediately. For example (with R starting), the following are an impossible Connect 4 position. | | | | | | | | | | | | | | | | | | | | | | | | | | |R| | | | | | | |Y| | | | | |R| |Y| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |Y|Y|Y|Y| | | | |R|R|R|R| | | And some possible states: | | | | | | | | | | | | | | | | | | | | | | | | | | |R| | | | | | | |Y| | | | | |R|R|Y| | | | | | | | | | | | | | | |Y| | | | | | | |R| | | | | | | |Y| | | | | | | |R| | | | | | |Y|R| | | | | """ rows = len(board) cols = len(board[0]) count_r = 0 count_y = 0 stacks = {} for col in range(cols): stacks[col] = [] for row in range(rows): tile = board[row][col] if tile == 'R': count_r += 1 stacks[col].insert(0, 'R') continue if tile == 'Y': count_y += 1 stacks[col].insert(0, 'Y') continue last_tile = 'R' if count_r > count_y else 'Y' if not (last_tile == 'R' and count_r == count_y + 1): return False if not (last_tile == 'Y' and count_r == count_y): return False remove = last_tile count = count_r + count_y while count > 0: for col in range(cols): stack = stacks[col] if stack[-1] == remove: del stack[-1] remove = 'R' if remove == 'Y' else 'Y' count -= 1 break if count > 0: return False return True
def _comment_ipython_line_magic(line, magic): """Adds suffix to line magics: # [magic] %{name} Converts: %timeit x = 1 Into: x = 1 # [magic] %timeit """ return line.replace(magic, '').strip() + f' # [magic] {magic.strip()}'
def point_dividing_a_line_segment(A, B, offset_from_A): """ :param A: coordinates of the start point of a line in 2D Space ([x, y] or (x, y)) :type A: list - [] or tuple - () :param B: coordinates of the end point of a line in 2D Space ([x, y] or (x, y)) :type B: list - [] or tuple - () :param offset_from_A: percent of the Euclidean distance between A and B where 0 % is equal to 0 and 100% is equal to 1. :type offset_from_A: float :return: coordinates of point along a line from A to B. The point is located between points A and B and is away from point A by the length equal to : (Euclidean distance between A and B) * offset_from_A A--C------B :rtype tuple - () """ x = (1 - offset_from_A) * A[0] + offset_from_A * B[0] y = (1 - offset_from_A) * A[1] + offset_from_A * B[1] return int(round(x)), int(round(y))
def get_conv_gradinputs_shape_1axis( kernel_shape, top_shape, border_mode, subsample, dilation ): """ This function tries to compute the image shape of convolution gradInputs. The image shape can only be computed exactly when subsample is 1. If subsample is not 1, this function will return None. Parameters ---------- kernel_shape: int or None. Corresponds to the kernel shape on a given axis. None if undefined. top_shape: int or None. Corresponds to the top shape on a given axis. None if undefined. border_mode: string, int or tuple of 2 ints. If it is a string, it must be 'valid', 'half' or 'full'. If it is an integer, it must correspond to the padding on the considered axis. If it is a tuple, its two elements must correspond to the asymmetric padding (e.g., left and right) on the considered axis. subsample: int. It must correspond to the subsampling on the considered axis. dilation: int. It must correspond to the dilation on the considered axis. Returns ------- image_shape: int or None. Corresponds to the input image shape on a given axis. None if undefined. """ if None in [kernel_shape, top_shape, border_mode, subsample, dilation]: return None if subsample != 1: return None # Implicit dilated kernel shape dil_kernel_shape = (kernel_shape - 1) * dilation + 1 if border_mode == "half": pad_l = pad_r = dil_kernel_shape // 2 elif border_mode == "full": pad_l = pad_r = dil_kernel_shape - 1 elif border_mode == "valid": pad_l = pad_r = 0 else: if isinstance(border_mode, tuple): pad_l, pad_r = border_mode else: pad_l = pad_r = border_mode if pad_l < 0 or pad_r < 0: raise ValueError("border_mode must be >= 0") # In case of symbolic shape, we want to build the smallest graph # image_shape = (top_shape - 1) * s - 2 * pad + dil_kernel_shape + a # where 0 <= a < subsample, but we have checked that subsample == 1 image_shape = top_shape + dil_kernel_shape - 1 if pad_l > 0: image_shape -= pad_l if pad_r > 0: image_shape -= pad_r return image_shape
def _make_values(count: int) -> list: """Helper method that returns a list with 'count' number of items""" return [f"item {i}" for i in range(1, count + 1)]
def euler(before, rhs, dt): """Forward Euler time stepping rhs: right hand side """ return before + rhs * dt
def rr_and(x, y, nx, ny): """Dimensionless production rate for a gene regulated by two repressors with AND logic in the absence of leakage. Parameters ---------- x : float or NumPy array Concentration of first repressor. y : float or NumPy array Concentration of second repressor. nx : float Hill coefficient for first repressor. ny : float Hill coefficient for second repressor. Returns ------- output : NumPy array or float 1 / (1 + x**nx) / (1 + y**ny) """ return 1.0 / (1.0 + x ** nx) / (1.0 + y ** ny)
def correct_letter_input(g1, l1_g): """ To make sure the user input is letter. """ while True: if g1.isalpha(): if l1_g == 1: break else: print("illegal format.") g = str(input("Your guess: ")) g1 = g.upper() l1_g = len(g1) else: print("illegal format.") g = str(input("Your guess: ")) g1 = g.upper() l1_g = len(g1) return g1
def get_tag(content, tag): """ This function returns the tag value """ if isinstance(content, bytes): content = content.decode('utf-8', 'ignore') for i in content.split(' '): if i.startswith(tag+'='): return i.split('"')[-2] return None
def joinmixedtypes(data, separator): """Separate iterable data and allow mixed types. Args: data (iter): Iterable data separator (str): Separator Returns: str: Joined data by separator. """ result = "" for item in data: if result == "": result = str(item) else: result += separator + str(item) return result
def changeLevBodyHead(VO, h, levDelta): #{{{2 """Increase or decrease level number of Body headline by levDelta. NOTE: markup modes can replace this function with hook_changeLevBodyHead. """ if levDelta==0: return h m = VO.marker_re.search(h) level = int(m.group(1)) return '%s%s%s' %(h[:m.start(1)], level+levDelta, h[m.end(1):])
def format_sample_name(n): """Remove unusual characters.""" return n.translate(str.maketrans("*:/", "_--"))
def _get_host_key_map(known_host_lines): """ See https://man.openbsd.org/sshd.8#SSH_KNOWN_HOSTS_FILE_FORMAT :param known_host_lines: lines in known_hosts format :return: dict((hostname, key_type) -> key """ keys_by_host = {} for line in known_host_lines: if not line: continue csv_hosts, key_type, key = line.split(' ') # filter out empty hosts ('[]:port' is empty host with non-standard port) hosts = [h for h in csv_hosts.split(',') if h and '[]' not in h] for host in hosts: keys_by_host[(host, key_type)] = key return keys_by_host
def insert_sort(a_list): """ >>> insert_sort([1, 12, 2, 4, 6, 15, 7]) [1, 2, 4, 6, 7, 12, 15] """ if len(a_list) <= 1: return a_list for i in range(1, len(a_list)): key = a_list[i] j = i - 1 while j > -1 and a_list[j] > key: a_list[j+1] = a_list[j] j -= 1 a_list[j+1] = key return a_list
def is_binary_format(content, maxline=20): """ parse file header to judge the format is binary or not :param content: file content in line list :param maxline: maximum lines to parse :return: binary format or not """ for lc in content[:maxline]: if b'format' in lc: if b'binary' in lc: return True return False return False
def double_quote(txt): """Double quote strings safely for attributes. Example: >>> double_quote('abc"xyz') '"abc\\"xyz"' """ return '"{}"'.format(txt.replace('"', '\\"'))
def _combine_histories(history1, history2): """Combine histories with minimal repeats.""" hist2_words = history2.split(' ') add_hist = '' test_hist1 = ' ' + history1 + ' ' for i, word in enumerate(hist2_words): if ' ' + word + ' ' not in test_hist1: add_hist += ' ' + word keep_going = (i + 1 < len(hist2_words)) while keep_going: if ((hist2_words[i + 1] == ' ') or (' ' + hist2_words[i + 1] + ' ' not in test_hist1)): add_hist += ' ' + hist2_words[i + 1] del(hist2_words[i + 1]) keep_going = (i + 1 < len(hist2_words)) else: keep_going = False return history1 + add_hist
def busca_sequencial(lista, elemento): """ Realiza busca sequencial do elemento passado por parametro lista -- lista de inteiros desordenada elemento -- elemento a ser buscado """ contador = 0 try: while contador <= len(lista): if lista[contador] == elemento: break contador += 1 return contador except IndexError: print('Elemento nao achado')
def merge(*ds): """ Merge together a sequence if dictionaries. Later entries overwrite values from earlier entries. >>> merge({'a': 'b', 'c': 'd'}, {'a': 'z', 'e': 'f'}) {'a': 'z', 'c': 'd', 'e': 'f'} """ if not ds: raise ValueError("Must provide at least one dict to merge().") out = {} for d in ds: out.update(d) return out
def crossref_data_payload( crossref_login_id, crossref_login_passwd, operation="doMDUpload" ): """assemble a requests data payload for Crossref endpoint""" return { "operation": operation, "login_id": crossref_login_id, "login_passwd": crossref_login_passwd, }
def baseB(sum,b,k): """ Convert sum to base b with k digits. """ result = [] while k: result.append(sum%b) sum //= b k-=1 result.reverse() return tuple(result)
def get_closest_value(values, value) -> int: """Get the closest value In a list of values get the closest one Args: values (list): The list to get the closest. value (int): The value we would like to come close to> Return: (int): The closest value """ return min(values, key=lambda x:abs(x-value))
def residual_imag(im, fit_re, fit_im): """ Relative Residuals as based on Boukamp's definition Ref.: - Boukamp, B.A. J. Electrochem. SoC., 142, 6, 1885-1894 Kristian B. Knudsen (kknu@berkeley.edu || kristianbknudsen@gmail.com) """ modulus_fit = (fit_re ** 2 + fit_im ** 2) ** (1 / 2) return (im - fit_im) / modulus_fit
def flatten(this_list): """Flatten nested lists inside a list""" values = [] for entry in this_list: if isinstance(entry, list): return flatten(entry) # else values.append(entry) return values
def _placeholder(dirname): """String of of @'s with same length of the argument""" return '@' * len(dirname)
def block_name(block_id): """Returns the scope name for the network block `block_id`.""" return 'progressive_gan_block_{}'.format(block_id)
def make_word_groups(vocab_words: list): """Transform a list containing a prefix and words into a string with the prefix followed by the words with prefix prepended. :param vocab_words: list - of vocabulary words with prefix in first index. :return: str - of prefix followed by vocabulary words with prefix applied. This function takes a `vocab_words` list and returns a string with the prefix and the words with prefix applied, separated by ' :: '. For example: list('en', 'close', 'joy', 'lighten'), produces the following string: 'en :: enclose :: enjoy :: enlighten'. """ prefix = vocab_words[0] vocab_words = [prefix + word if word != prefix else prefix for word in vocab_words] return " :: ".join(vocab_words)
def B1_rel_diff(v0w, b0w, b1w, v0f, b0f, b1f, config_string, prefact, weight_b0, weight_b1): """ Returns the reletive difference in the derivative of the bulk modulus. THE SIGNATURE OF THIS FUNCTION HAS BEEN CHOSEN TO MATCH THE ONE OF ALL THE OTHER FUNCTIONS RETURNING A QUANTITY THAT IS USEFUL FOR COMPARISON, THIS SIMPLIFIES THE CODE LATER. Even though several inputs are useless here. """ return prefact*2*(b1w-b1f)/(b1w+b1f)
def filter_functions(input_set, filter_set): """ Keeps only elements in the filter set :param input_set: :param filter_set: :return: """ ns = {} filter_low = {x.lower() for x in filter_set} for x in input_set: xl = x.lower() if xl in filter_low: ns[x] = input_set[x] return ns
def _str_dict(d): """Return a string representing dict of strings to strings.""" return " ".join([f"{k}={v}" for k, v in d.items()]) if d else "[none]"
def seglength(pyccd_result, ordinal, series_start): """ The number of days since the beginning of the segment that the ordinal intersects with. The days between and around segments identified through the change detection process comprise valid segments for this. This why we need to know when the actual start ordinal, as the segments identified through change detection might not include it. Defaults to 0 in cases where the given ordinal day to calculate from is either < 1 or is before the start of the time series. Args: pyccd_result: dict return from pyccd ordinal: ordinal day to calculate from series_start: ordinal day when the change detection was started from Returns: int """ ret = 0 if ordinal > 0: all_dates = [series_start] for segment in pyccd_result['change_models']: all_dates.append(segment['start_day']) all_dates.append(segment['end_day']) diff = [(ordinal - d) for d in all_dates if (ordinal - d) > 0] if diff: ret = min(diff) return ret
def serviceCat(service): """ compute the path to this __service manifest path """ return 'Service/%s' % service
def contain_disambig_symbol(phones): """Return true if the phone sequence contains disambiguation symbol. Return false otherwise. Disambiguation symbol is at the end of phones in the form of #1, #2... There is at most one disambiguation symbol for each phone sequence""" return True if phones[-1].startswith("#") else False
def get_dynamic_per_repository(repo): """ Get all dynamic patterns in the repository. Keyword arguments: repo -- object containing properties of the repo """ dynamic_count = 0 if 'DYNAMIC-PATTERN' in repo['uniquePatterns']: dynamic_count += repo['uniquePatterns']['DYNAMIC-PATTERN'] return dynamic_count
def _decode_list(vals): """ List decoder """ return [val.decode() if hasattr(val, 'decode') else val for val in vals]
def jp_split(s): """ split/decode a string from json-pointer """ if s == '' or s == None: return [] def _decode(s): s = s.replace('~1', '/') return s.replace('~0', '~') return [_decode(ss) for ss in s.split('/')]
def render_plugins(plugins, context, placeholder, processors=None): """ Renders a collection of plugins with the given context, using the appropriate processors for a given placeholder name, and returns a list containing a "rendered content" string for each plugin. This is the main plugin rendering utility function, use this function rather than Plugin.render_plugin(). """ out = [] total = len(plugins) for index, plugin in enumerate(plugins): plugin._render_meta.total = total plugin._render_meta.index = index context.push() out.append(plugin.render_plugin(context, placeholder, processors=processors)) context.pop() return out
def evap_u(infil, pet_r, srz, rzsc, beta, ce): """evaporation from unsaturated soil""" srz += infil eu = pet_r * min(srz / (rzsc * (1 + beta) * ce), 1) eu = min(eu, srz) srz -= eu return eu, srz
def _http(base_uri, *extra): """Combine URL components into an http URL""" parts = [str(e) for e in extra] str_parts = ''.join(parts) str_base = str(base_uri) if str_base.startswith("http://"): return "{0}{1}".format(str_base, str_parts) elif str_base.startswith("https://"): return "{0}{1}".format(str_base.replace("https:","http:", 1), str_parts) else: return "http://{0}{1}".format(str_base, str_parts)
def _merge_sorted_nums_iter(sorted_nums1, sorted_nums2): """Helper method for merge_sort_iter(). Merge two sorted lists by iteration. """ # Apply two pointer method. i, j = 0, 0 result = [] for _ in range(len(sorted_nums1) + len(sorted_nums2)): if i < len(sorted_nums1) and j < len(sorted_nums2): if sorted_nums1[i] <= sorted_nums2[j]: result.append(sorted_nums1[i]) i += 1 else: result.append(sorted_nums2[j]) j += 1 elif i < len(sorted_nums1) and j >= len(sorted_nums2): result.extend(sorted_nums1[i:]) break elif i >= len(sorted_nums1) and j < len(sorted_nums2): result.extend(sorted_nums2[j:]) break return result
def mongo_stat(server, args_array, **kwargs): """Method: mongo_stat Description: Function stub holder for mongo_perf.mongo_stat. Arguments: (input) server (input) args_array (input) **kwargs class_cfg """ status = True if server and args_array and kwargs.get("class_cfg", True): status = True return status
def conditional_pre_gpb_drop(df_occ_slice: list, df_meta_slice: list): """ Drop samples from metadata dataframe slice depending on already reduced Occurrences slice (occ slice set up as basis for drop because it's the fastest to compute. Only runs if contents df_occ_slice have already been reduced. :param df_occ_slice: list of (file_code, df_occurrence_slice) tuples :param df_meta_slice: list of (file_code, df_meta_slice) tuples :return: reduced df_meta_slice """ for df_code_i, df_slice_i in df_occ_slice: for df_code_j, df_slice_j in df_meta_slice: if df_code_i == df_code_j: seq_occ = df_slice_i.Sequence.values df_slice_j.query("Sequence.isin(@seq_occ)", inplace=True) return df_meta_slice
def get_position_patterns(contours, hierarchy): """ get qr code position pattern :param contours: :param hierarchy: :return: found position pattern index """ found = [] for i in range(len(contours)): k = i c = 0 while hierarchy[k][2] != -1: k = hierarchy[k][2] c += 1 if c >= 5: found.append(i) return found
def simple_repos(ln, old_pos, new_pos): """ Return a new list in which each item contains the index of the item in the old order. Uses the delete/insert approach (best for small arrays). """ r1 = list(range(ln)) val = r1[old_pos] del r1[old_pos] r1.insert(new_pos, val) return r1
def get_value(string): """ Returns only the numeric value from a cif item like 1.234(4). :parameters ** string ** A cif value as string :returns The value without error. (`float`) """ if "(" in string: vval = string.split("(")[0] if vval == '': vval = '0.0' return float(vval) else: try: return float(string) except ValueError: return 0.0
def has_dependencies(node, dag): """Checks if the node has dependencies.""" for downstream_nodes in dag.values(): if node in downstream_nodes: return True return False