content
stringlengths
42
6.51k
def get_next_match_pick_first_available(population): """ Decides next two player indexes who will play against each other next, iterating over the list and picking the very first available""" p1 = None for player in population: if player.available: if p1 is not None: return p1, player else: p1 = player
def set_privacy(annotations, key, is_private=True, value_types=['longAnnos', 'doubleAnnos', 'stringAnnos']): """ Set privacy of individual annotations, where annotations are in the format used by Synapse SubmissionStatus objects. See the `Annotations documentation <http://rest.synapse.org/org/sagebionetworks/repo/model/annotation/Annotations.html>`_ and the docs regarding `querying annotations <http://rest.synapse.org/GET/evaluation/submission/query.html>`_. :param annotations: Annotations that have already been converted to Synapse format using :py:func:`to_submission_status_annotations`. :param key: The key of the annotation whose privacy we're setting. :param is_private: If False, the annotation will be visible to users with READ permission on the evaluation. If True, the it will be visible only to users with READ_PRIVATE_SUBMISSION on the evaluation. Note: Is this really correct??? :param value_types: A list of the value types in which to search for the key. Defaults to all types ['longAnnos', 'doubleAnnos', 'stringAnnos']. """ for value_type in value_types: kvps = annotations.get(value_type, None) if kvps: for kvp in kvps: if kvp['key'] == key: kvp['isPrivate'] = is_private return kvp raise KeyError('The key "%s" couldn\'t be found in the annotations.' % key)
def median(arr): """ Calculate the median of all the values in a list :param arr: The list of values :type arr: list :return: The median of all the numbers :rtype: float """ arr.sort() n = len(arr) mid = int(n / 2) return arr[mid]
def adaptMetricsInterval(metrics, interval): """Transforms the selected metrics and the time interval (daily, weekly, etc.) into a understandable metrics Parameters: ----------- metrics : str list list of cumulatives selected metrics interval : str time intervall (daily, weekly, biweekly, monthly, or none) Returns ----------- M : str list the list of correspondiing metrics """ if interval == None: I = '' else: I = interval M = [I + metric if metric in ['Cases', 'Deaths', 'Tests'] else metric for metric in metrics ] return M
def ramsey_sequence(length, target): """ Generate a gate sequence to measure dephasing time in a two-qubit chip. Parameters ---------- length : int Number of Identity gates. target : str Which qubit is measured. Options: "left" or "right" Returns ------- list Dephasing sequence. """ wait = ["id"] rotate_90 = [f"rx90p[{str(target)}]"] S = [] S.extend(rotate_90) S.extend(wait * length) S.extend(rotate_90) return S
def pipeline(ticker, years): """Converts user input to appropriate types""" ticker = str(ticker) years = int(years) return ticker, years
def DivideAndCeil(dividend, divisor): """Returns ceil(dividend / divisor). Takes care to avoid the pitfalls of floating point arithmetic that could otherwise yield the wrong result for large numbers. Args: dividend: Dividend for the operation. divisor: Divisor for the operation. Returns: Quotient. """ quotient = dividend // divisor if (dividend % divisor) != 0: quotient += 1 return quotient
def merge_dicts(src, dest): """Merge to dictionaries and return a new dictionary """ ndict = dict(src) ndict.update(dest) return ndict
def construct_dict_from_source(fields, source): """ Construct a new dict from a source dict and catch all KeyErrors, using predefined functions to extract desired values from a source dictionary. :param fields: Dictionary with fields in the the target dict as keys and functions to extract their desired value . for these fields from the source dict :type fields: dict :param source: Source dictionary. :type source: dict :return: Target dictionary. :rtype: dict """ new_dict = {} for field_name, getter_func in fields.items(): try: new_dict[field_name] = getter_func(source) except KeyError: pass return new_dict
def _value_is_array(value): """Check if a parameter value is an array. Parameters ---------- value : str Parameter value as a string. Returns ------- bool ``True`` if the value is an array. Otherwise, ``False``. """ return "," in value
def _character_to_symbol(character): """ converts a single character to the mathematic symbol, does not apply all tex formatting """ if character == "T": return r"v_{\theta}" elif character == "W": return r"v_{z}" elif character == "R": return r"v_{r}" elif character == "U": return r"v_{x}" elif character == "V": return r"v_{y}" elif character == "t": return r"v_{\theta}^{\prime}" elif character == "w": return r"v_{z}^{\prime}" elif character == "r": return r"v_{r}^{\prime}" elif character == "u": return r"v_{x}^{\prime}" elif character == "v": return r"v_{y}^{\prime}" elif character == "M": return r"v_{magnitude}" elif character == "P": return r"v_{in-plane}" else: return character
def get_distinct_edge(edge_array): """ Return Distinct edges from edge array of multiple graphs >>> sorted(get_distinct_edge(edge_array)) ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] """ distinct_edge = set() for row in edge_array: for item in row: distinct_edge.add(item[0]) return list(distinct_edge)
def login_allowed(user): """ @user_passes_test decorator to check whether the user is allowed to access the application or not. We do not want to allow non-UserBackend users to access the application (because we need the LDAP entry for the shares etc.) so we check that here. """ if user is None or user.get_username() is None: # AnonymousUser return False return hasattr(user, 'backend_user')
def _format_print(string, prefix=""): """Inserts the given prefix at the beginning of each line""" if prefix: string = prefix + ("\n" + prefix).join(string.splitlines()) return string
def nice_pair(pair): """Make a nice string representation of a pair of numbers. If the numbers are equal, just return the number, otherwise return the pair with a dash between them, indicating the range. """ start, end = pair if start == end: return "%d" % start else: return "%d-%d" % (start, end)
def float_to_htk_int(string): """ Converts a string representing a floating point number to an integer (time in 100ns units)... """ return int(round(float(string)*10000000))
def hexStr2Bytes(hexStr: str) -> bytes: """ Convert an hexadecimal string in bytes :param hexStr: The hexadecimal string :type hexStr: str :return: The bytes of the hexadecimal string :rtype: bytes """ return bytes.fromhex(hexStr)
def get_new_hw(h, w, size, max_size): """Get new hw.""" scale = size * 1.0 / min(h, w) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size if max(newh, neww) > max_size: scale = max_size * 1.0 / max(newh, neww) newh = newh * scale neww = neww * scale neww = int(neww + 0.5) newh = int(newh + 0.5) return neww, newh
def bubblesort_core(x): """ sort x by bubblesort. also, keep track of assignments and conditionals. """ assignments = 0 conditionals = 0 # in X steps... for step in range(len(x)): # for each of the first X-step element pairs... for i in range(len(x)-1-step): # swap if they're out of order. conditionals += 1 if x[i] > x[i+1]: assignments += 2 x[i], x[i+1] = x[i+1], x[i] # then check. Not counting these. for i in range(len(x)-1): assert x[i] <= x[i+1] return x, conditionals, assignments
def Gt(field, value): """ A criterion used to search for a field greater than a certain value. For example * search for TLP > 2 * search for customFields.cvss > 4.5 * search for date > now Arguments: field (value): field name value (Any): field value Returns: dict: JSON repsentation of the criterion ```python query = Gt('tlp', 1) ``` produces ```json {"_gt": {"tlp": 1}} ``` """ return {'_gt': {field: value}}
def getindex(ndim, ind, strides): """Convert multi-dimensional index to the position in the flat list.""" ret = 0 for i in range(ndim): ret += strides[i] * ind[i] return ret
def zakharov(ind): """Zakharov function defined as: $$ f(x) = \sum_{i=1}^n x_i^2 + (\sum_{i=1}^n 0.5 i x_i)^2 + (\sum_{i=1}^n 0.5 i x_i)^4$$ with a search domain of $-5 < x_i < 10, 1 \leq i \leq n$. The global minimum is at $f(x_1, ..., x_n) = f(0, ..., 0) = 0. """ return sum((x**2. for x in ind)) + \ sum(( 0.5 * float(i) * ind[i] for i in range(len(ind)) ))**2. + \ sum(( 0.5 * float(i) * ind[i] for i in range(len(ind)) ))**4.,
def get_flowcell_name_from_desc(description_dict, user_run_name): """ Get the flowcell name from the description Parameters ---------- description_dict: dict A parsed dictionary created from the description from the fastq record user_run_name: str The user run name that we have been given on the command line Returns ------- flowcell_name: str The flowcell name that we are gonna be using """ if "flow_cell_id" in description_dict: flowcell_name = description_dict["flow_cell_id"] elif "sample_id" in description_dict: flowcell_name = description_dict["sample_id"] elif "sampleid" in description_dict: flowcell_name = description_dict["sampleid"] else: flowcell_name = user_run_name return flowcell_name
def parse_details_url(record): """ > Site URLs can be similarly constructed from some Services, > such as https://waterdata.usgs.gov/nwis/uv/?site_no=14113000, when > ServCode = "NWISDV" and location = "NWISDV:14113000" Ref: https://github.com/WikiWatershed/model-my-watershed/issues/1931 """ location = record['location'] if any(code in location for code in ('NWISDV', 'NWISGW', 'NWISUV', 'EnviroDIY')): parts = location.split(':') if len(parts) == 2: code, id = parts if code == 'NWISDV': url = 'https://waterdata.usgs.gov/nwis/dv/?site_no={}' return url.format(id) elif code == 'NWISUV': url = 'https://waterdata.usgs.gov/nwis/uv/?site_no={}' return url.format(id) elif code == 'NWISGW': url = ('https://nwis.waterdata.usgs.gov/' + 'usa/nwis/gwlevels/?site_no={}') return url.format(id) elif code == 'EnviroDIY': url = 'http://data.envirodiy.org/sites/{}/' return url.format(id) return None
def rough_calibration(pis, mission): """ Parameters ---------- pis: float or array of floats PI channels in data mission: str Mission name Returns ------- energies : float or array of floats Energy values Examples -------- >>> rough_calibration(0, 'nustar') 1.6 >>> # It's case-insensitive >>> rough_calibration(1200, 'XMm') 1.2 >>> rough_calibration(10, 'asDf') Traceback (most recent call last): ... ValueError: Mission asdf not recognized >>> rough_calibration(100, 'nicer') 1.0 """ if mission.lower() == "nustar": return pis * 0.04 + 1.6 elif mission.lower() == "xmm": return pis * 0.001 elif mission.lower() == "nicer": return pis * 0.01 raise ValueError(f"Mission {mission.lower()} not recognized")
def roundToMultiple(x, y): """Return the largest multiple of y < x Args: x (int): the number to round y (int): the multiplier Returns: int: largest multiple of y <= x """ r = (x + int(y / 2)) & ~(y - 1) if r > x: r = r - y return r
def _is_c2d_topic(split_topic_str): """ Topics for c2d message are of the following format: devices/<deviceId>/messages/devicebound :param split_topic_str: The already split received topic string """ if "messages/devicebound" in split_topic_str and len(split_topic_str) > 4: return True return False
def get_sql_result_as_dict_list(res): """Returns the result set as a list of dicts Args: res: (object): The sql result set Returns: A list of dicts """ if not res: return [] col_names = res.get_column_names() rows = res.fetch_all() dict_list = [] for row in rows: item = {} for col_name in col_names: item[col_name] = row.get_field(col_name) dict_list.append(item) return dict_list
def binHits(hitMap): """ return map of assignments to list of reads """ hits = {} for (read, hit) in hitMap.items(): if isinstance(hit, list): for h in hit: hits.setdefault(h, []).append(read) else: hits.setdefault(hit, []).append(read) return hits
def _value_checker(index_input): """Helper function to input check the main index functions""" if index_input == "": # empty string, default index return "default" try: return float(index_input) except ValueError: return False
def prepare_commands(commands): """converts commands to Eapi formatted dicts""" formatted = [] for command in commands: answer = command.answer or "" command = command.cmd.strip() formatted.append({"cmd": command, "input": answer}) return formatted
def lerp(a, b, t): """ Returns the linear interpolation between a and b at time t between 0.0-1.0. For example: lerp(100, 200, 0.5) => 150. """ if t < 0.0: return a if t > 1.0: return b return a + (b - a) * t
def getbool(value, default=None, truevalues=set((True, 1, '1', 't', 'true', 'True')), falsevalues=set((False, 0, '0', 'f', 'false', 'False'))): """Convert a given value to True, False, or a default value. If the given value is in the given truevalues, True is returned. If the given value is in the given falsevalues, False is returned. Otherwise, the default value is returned. """ if value in truevalues: return True elif value in falsevalues: return False else: return default
def sorted_by_key(x, i, reverse=False): """sort by key""" # Sort by distance def key(element): return element[i] return sorted(x, key=key, reverse=reverse)
def longest_positive_sequence(the_list): """ Function that given a list, it returns the longest number of positive numbers. If it returns 3, then there exists 3 consecutive positive numbers. @param the_list: an array of integers @complexity: best-case and worst-case is O(N) where N is the length of the list this happens when the loop iterates the entire given list to compute the number of consecutive positive numbers and store them in a memory @return: longest number of positive numbers """ memory = [0] * len(the_list) # base case if the_list[0] > 0: memory[0] = 1 # counter to keep track of the longest count of positive numbers longest_length = memory[0] # dynamic approach for i in range(1, len(the_list)): if the_list[i] > 0: memory[i] = 1 + memory[i-1] if memory[i] > longest_length: longest_length = memory[i] print(memory) print(longest_length) return longest_length
def _fingerprint(row): """Generate a string-based fingerprint to characterize row diversity.""" return ''.join(map(lambda x: str(type(x)), row))
def remove_html_tags(text): """Remove html tags from a string""" import re # make semi colon after list element text = text.replace('</li>', ";") # clean other tags clean = re.compile('<.*?>') return re.sub(clean, ' ', text)
def size_increment(added_content, deleted_content): """ :param added_content: content added to wiki by an user :param deleted_content: content deleted from wiki by an user :return: ratio of added to deleted content useful to identify deletion vandalism, where a vandal can remove data in large amounts """ return abs(len(added_content) - len(deleted_content))
def _gen_table_cols(col_ids): """Generate Dash table columns in the expected format. :param col_ids: list of columns; must be in format <table-alias.name>, like "s.serial_number", as in the SQL select statement -- except for derived column values which must literally use "alias." plus the name. :return: List of dictionaries, where each contains an 'id' and a 'name' key for a Dash DataTable. """ col_list = [] for col in col_ids: col_dict = {} split_col = col.partition('.') if split_col[0] == 'alias': col_dict['id'] = 'alias_{}'.format(split_col[2]) else: col_dict['id'] = split_col[2] col_dict['name'] = split_col[2] col_list.append(col_dict) return col_list
def deep_len(lst): """Returns the deep length of the list. >>> deep_len([1, 2, 3]) # normal list 3 >>> x = [1, [2, 3], 4] # deep list >>> deep_len(x) 4 >>> x = [[1, [1, 1]], 1, [1, 1]] # deep list >>> deep_len(x) 6 """ "*** YOUR CODE HERE ***" if type(lst) != list: return 0 lst_len = 0 for elem in lst: if type(elem) == list: lst_len += deep_len(elem) else: lst_len += 1 return lst_len
def run_is_big_enough(s, e, bankend): """Check whether a run is big enough to consider. A run of $FF or $00 is OK if it's 32 bytes or longer, or if it's 10 bytes or longer and touches the reset vectors. """ if e - s >= 32: return True if s <= bankend - 15 and e >= bankend - 6: return True return False
def get_connected_devices(device, data): """Get all devices that are connected to a device in a certain state.""" result = [] if device not in data: return result for interface in data[device]: if "device" in data[device][interface]: result.append(data[device][interface]["device"]) return result
def group_name_to_team_name(group_name): """Return the team name corresponding to Keycloak's `group_name`.""" if group_name.startswith("TEAM-"): return group_name[len("TEAM-") :] return group_name
def typify(tokens): """ Returns a dictionary of unique types with their frequencies. :param tokens:a list of tokens :type tokens:list :return:a dictionary of unique types with their frequencies. :rtype:dict """ temp_types = {} for word in tokens: if word not in temp_types.keys(): temp_types[word] = 1 else: temp_types[word] += 1 return sorted(temp_types.items())
def _prepare_dict_inputs(inputs, tensor_info_map): """Converts inputs to a dict of inputs and checks extra/missing args. Args: inputs: inputs fed to Module.__call__(). tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo` describing the signature inputs. Returns: A dict of values with the same keys as tensor_info_map. Raises: TypeError: If it fails to convert the input values into a dict of tensors to feed to the signature instantiation. """ if inputs is None: dict_inputs = {} elif isinstance(inputs, dict): dict_inputs = inputs elif len(tensor_info_map) == 1: dict_inputs = {list(tensor_info_map.keys())[0]: inputs} elif not tensor_info_map: raise TypeError("Signature expects no inputs.") else: raise TypeError("Signature expects multiple inputs. Use a dict.") dict_inputs_keys = set(dict_inputs.keys()) tensor_info_map_keys = set(tensor_info_map.keys()) if dict_inputs_keys != tensor_info_map_keys: raise TypeError("Cannot convert dict_inputs: missing %r, extra given %r" % (sorted(list(tensor_info_map_keys - dict_inputs_keys)), sorted(list(dict_inputs_keys - tensor_info_map_keys)))) return dict_inputs
def list_set_bits(r, expected_length): """Return list of positions of bits set to one in given data. This method is used to read e.g. violated zones. They are marked by ones on respective bit positions - as per Satel manual. """ set_bit_numbers = [] bit_index = 0x1 assert (len(r) == expected_length + 1) for b in r[1:]: for i in range(8): if ((b >> i) & 1) == 1: set_bit_numbers.append(bit_index) bit_index += 1 return set_bit_numbers
def remove_plural_words (dictionary, lang): """ `remove_plural_words()` removes from the dictionary every word that is already in the dictionary in singular form. * **dictionary** (*list*) : the input dictionary (while processing) * **lang** (*str*) : the language used to follow plural rules (only `FR` is available yet) * **return** (*list*) : the dictionary without duplicated words in singular / plural forms """ words_to_del = [] if lang == 'fr': for word in dictionary: l = len(word) if word[l-1] == 's'and word[:l-1] in dictionary \ or word[l-1] == 'x'and word[:l-1] in dictionary \ or l > 3 and word[l-3:l] == 'aux'and word[:l-3]+'al' in dictionary \ or l > 3 and word[l-3:l] == 'aux'and word[:l-3]+'ail' in dictionary: words_to_del.append(word) words_to_del = set(words_to_del) for word in words_to_del: dictionary.remove(word) # HERE insert plural rules from other languages return (dictionary)
def ValueNameFromTraceAndChartName(trace_name, chart_name=None): """Mangles a trace name plus optional chart name into a standard string. A value might just be a bareword name, e.g. numPixels. In that case, its chart may be None. But, a value might also be intended for display with other values, in which case the chart name indicates that grouping. So, you might have screen.numPixels, screen.resolution, where chartName='screen'. """ assert trace_name != 'url', 'The name url cannot be used' if chart_name: return '%s.%s' % (chart_name, trace_name) else: assert '.' not in trace_name, ('Trace names cannot contain "." with an ' 'empty chart_name since this is used to delimit chart_name.trace_name.') return trace_name
def f_beta(precision, recall, beta = 1): """ Get F_beta score from precision and recall. """ beta = float(beta) # Make sure that results are in float return (1 + pow(beta, 2)) * (precision * recall) / ((pow(beta, 2) * precision) + recall)
def split(number, portion=0.9): """ splitting a data set into train and val set :param number: Int / number of samples in dataset :param portion: Float / percentile of samples that go into train set :return: list of Int / numbers indicating samples needed in train and val set according to portion """ # splitting data set return [round(portion * number), round((1 - portion) * number)]
def xp_calculation(length: int): """ Calculate the XP for the given message length :param length: :return: """ if length <= 10: xp = 0.1 elif 10 < length <= 200: xp = ((length / 200) * 2.5) + 0.5 elif 200 < length <= 400: xp = 2.5 elif 400 < length <= 600: xp = 2 elif 600 < length <= 800: xp = 1.5 elif 800 < length <= 1000: xp = 1 else: xp = 0 return xp
def to_one_dimensional_array(iterator): """convert a reader to one dimensional array""" array = [] for i in iterator: if type(i) == list: array += i else: array.append(i) return array
def floor_lg(n: int) -> int: """Return floor(log_2(n)) for a positive integer `n`""" assert n > 0 r = 0 t = 1 while 2 * t <= n: t = 2 * t r = r + 1 return r
def get_command_from_state(state): """ This method gets appropriate command name for the state specified. It returns the command name for the specified state. :param state: The state for which the respective command name is required. """ command = None if state == 'present': command = 'vrouter-create' if state == 'absent': command = 'vrouter-delete' if state == 'update': command = 'vrouter-modify' return command
def transform(data, ops=None): """ transform """ if ops is None: ops = [] for op in ops: data = op(data) if data is None: return None return data
def db_to_lin(db): """ Convert gain in dB to linear. """ return 10.0**(db/20.0)
def validate_port_number(port_number): """Check if the port number is within range. :param int port_number: The port number to check. :return: True if the port number is valid; false if not. :rtype: bool :raises ValueError: if the port number is invalid. """ if port_number not in range(0, 65535): raise ValueError("Invalid port number.") return True
def serial_to_ring(x: int) -> int: """Convert serialized chamber id to ring.""" return ((x >> 6) & 0x00000003) + 1
def collect_functions(functions, r=None): """Return functions for next values of latches. @param functions: `dict` as returned by `make_functions` """ if r is None: r = dict() r.update( (var, d['function']) for var, d in functions.items()) return r
def merge_lines(lines, start, join=" "): """Gets a single continuous string from a sequence of lines. :param list lines: The lines to merge. :param int start: The start point in each record. :param str join: The string to join on. :rtype: ``str``""" string = join.join([line[start:].strip() for line in lines]) return string
def cvtHH(hhstr, pos=0): """ Convert HH hex [sub]string value into integer. Parameters: hhstr - HH [sub]string format: "....HH...." pos - starting position in string. default: 0 (start of string) Return Value: Returns converted integer value on success. None on error. """ h = '0x' + hhstr[pos] + hhstr[pos+1] try: val = eval(h) return val except (SyntaxError, NameError, TypeError): return None
def add_device_info(mappings, session): """ Adds the informations about the device for each mapping. - If the mapping is related to a device, gives the device id. - If it is common space, takes the id 0 - ! Not used here ! If it is part of a "memory hole", id is -1 Input : - mappings : the list of mappings you want to sort - session : the database session """ for mapping in mappings: if mapping.device != None: # Devices map exactly one segment for them specifically mapping_device = mapping.device mapping.devices_id = mapping_device.id else: # If the mapping is in the mapping table and not related to a Device, # it's part of the common space to be used mapping.devices_id = 0 return mappings
def perimeterTriangle(side1: float, side2: float, base: float) -> float: """Finds perimeter of triangle""" perimeter: float = side1 + base + side2 return perimeter
def pixellate(resolution_x=320, resolution_y=240): """ A sharp pixellation filter. Author: SolarLune Date Updated: 6/6/11 resolution_x = target resolution on the X axis. Defaults to 320. resolution_y = target resolution on the Y axis. Defaults to 240. A larger X-axis resolution would equal a less blocky picture. Note that the pixellation is locked to whole numbers, so there's no way to get "1.5x" pixellation, so to speak. You should probably choose a resolution that's both rather small as well as a resolution that is a whole division of what you're going to be running the game at most likely (i.e. 320x240 on a 1280x960 game window, not 600x500 on a 800x600 game window) """ return (""" uniform sampler2D bgl_RenderedTexture; uniform float bgl_RenderedTextureWidth; uniform float bgl_RenderedTextureHeight; void main(void) { vec2 uv = gl_TexCoord[0].xy; vec2 pixel = vec2(1.0 / bgl_RenderedTextureWidth, 1.0 / bgl_RenderedTextureHeight); int target_x = int(ceil(bgl_RenderedTextureWidth / """ + str(float(resolution_x)) + """)); int target_y = int(ceil(bgl_RenderedTextureHeight / """ + str(float(resolution_y)) + """)); float dx = pixel.x * target_x; float dy = pixel.y * target_y; vec2 coord = vec2(dx * floor(uv.x / dx), dy * floor(uv.y / dy)); coord += pixel * 0.5; // Add half a pixel distance so that it doesn't pull from the pixel's edges, // allowing for a nice, crisp pixellation effect coord.x = min(max(0.001, coord.x), 1.0); coord.y = min(max(0.001, coord.y), 1.0); gl_FragColor = texture2D(bgl_RenderedTexture, coord); } """)
def incident_related_resource_data_to_xsoar_format(resource_data, incident_id): """ Convert the incident relation from the raw to XSOAR format. :param resource_data: (dict) The related resource raw data. :param incident_id: The incident id. """ properties = resource_data.get('properties', {}) formatted_data = { 'ID': properties.get('relatedResourceName'), 'Kind': properties.get('relatedResourceKind'), 'IncidentID': incident_id } return formatted_data
def clean_word(word): """Cleans word from chars that are not allowed Arguments: word {string} -- the word to be cleaned """ return word.replace('\n', '').replace('=', '').replace('(', '').replace(')', '') \ .replace('"', '') .replace(',', '').replace('.', '')
def is_good(entry): """ Good entries on a bropage have more upvotes than downvotes. """ try: return entry["up"] >= entry["down"] except: return True
def binary_search(arr, key): """ Searches for the key in a list and returns the position of the key in the array. If the key can not be found in the list, raise a ValueError Will keep shifting the middle point for as long as the middle value is not equal to the search key If, the search key is less than the middle value, we shift the high point by the middle value - 1 the same applies if the key is greater than the low point, only difference is, we increase the low point by 1 :param arr: The array, which will be a list of numbers :param key: search key, what we will search for :return: Position of the key in the array :raises: ValueError if the key is not in the array """ low = 0 high = len(arr) - 1 while low <= high: middle = (low + high) // 2 if arr[middle] > key: high = middle - 1 elif arr[middle] < key: low = middle + 1 else: return middle raise ValueError("Value not found")
def validate_ansible_playbook(response_dict): """ Validate if ansible playbook ran OK. Returns: success = Bool """ if ( "FAILED" in response_dict["ansible_output"] or "ERROR" in response_dict["ansible_output"] or "WARNING" in response_dict["ansible_output"] ): success = False else: success = True return success
def unique_match_from_list(list): """ Check the list for a potential pattern match @param list : a list of potential matching groups @rtype : return the string representation of the unique value that matched, or nothing if nothing matched """ result = '' for item in list: if item != None: result = str(item) return result
def strip_shacl_prefix(url: str) -> str: """Strip the shacl prefix and return value of the url. Args: url (str): String with shacl prefix. Returns: str: String after removing shacl prefix.. """ term = str(url) return term[27:]
def parse_updates(updates_string): """ Parses updates string in a report and returns a sanitized version """ updates = [] ulist = updates_string.split() while ulist: updates.append('{0!s} {1!s} {2!s}\n'.format(ulist[0], ulist[1], ulist[2])) ulist.pop(0) ulist.pop(0) ulist.pop(0) return updates
def value_of_card(card: str): """Determine the scoring value of a card. :param card: str - given card. :return: int - value of a given card. See below for values. 1. 'J', 'Q', or 'K' (otherwise known as "face cards") = 10 2. 'A' (ace card) = 1 3. '2' - '10' = numerical value. """ if card in ("J", "Q", "K"): return 10 if card == "A": return 1 return int(card)
def check_brack_o(count): """ Help funktion for balance brackets """ if count > 0: print("No match") return 0 return 1
def is_virtual_column(col_id): """ Returns whether col_id is of a special column that does not get communicated outside of the sandbox. Lookup maps are an example. """ return col_id.startswith('#')
def like_rnncell(cell): """Checks that a given object is an RNNCell by using duck typing.""" conditions = [hasattr(cell, "output_size"), hasattr(cell, "state_size"), hasattr(cell, "zero_state"), callable(cell)] return all(conditions)
def check_abs_diff(x0: float, x1: float, tol: float = 3) -> bool: """ Check absolute difference between two numbers. This will test if the absolute difference between two numbers is within a certain tolerance. Parameters ---------- x0, x1: float the numbers to difference tol: float the tolerance (inclusive) Returns ------- bool: True if the absolute difference is within the tolerance """ return abs(x0 - x1) <= tol
def levenshtein_dynamic(s1: str, s2: str) -> int: """Return the minimum edit distance between strings s1 and s2. This function implements the Levenshtein distance algorithm using Dynamic Programming. Note: This function is not required by the levenshtein automaton, but I felt it that it could be useful to illustrate the basic idea of the Levenshtein algorithm. This function is the same function as the levenshtein function in helpers.py. """ dp = list(range(0, len(s2) + 1)) # dp stands for dynamic programming # technically, I can reduce len(dp) to min(len(s1), len(s2)), but its not necessary. for i in range(len(s1)): for d in range(len(dp) - 1, 0, -1): j = d - 1 dp[d] = min(dp[d] + 1, dp[d - 1] + (s1[i] != s2[j])) dp[0] = i + 1 for d in range(1, len(dp)): dp[d] = min(dp[d], dp[d - 1] + 1) # print(dp) return dp[-1]
def escape(s): """Replace special characters '&', "'", '<', '>' and '"' by XML entities.""" s = s.replace("&", "&amp;") # Must be done first! s = s.replace("'", "&apos;") s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") s = s.replace('"', "&quot;") return s
def fuzzy_match_simple(pattern, instring): """Return True if each character in pattern is found in order in instring. :param pattern: the pattern to be matched :type pattern: ``str`` :param instring: the containing string to search against :type instring: ``str`` :return: True if there is a match, False otherwise :rtype: ``bool`` """ p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring) while (p_idx != p_len) and (s_idx != s_len): if pattern[p_idx].lower() == instring[s_idx].lower(): p_idx += 1 s_idx += 1 return p_len != 0 and s_len != 0 and p_idx == p_len
def to_tuple(param, low=None): """Converts a parameter to a tuple.""" if isinstance(param, (list, tuple)): return tuple(param) elif param is not None: if low is None: return -param, param return (low, param) if low < param else (param, low) else: return param
def variable_id(printer, ast): """Prints "varName [= initData]".""" var_name_str = ast["varName"] array_decl_str = ''.join(map(lambda decl: f'[{printer.ast_to_string(decl)}]', ast["arrayDecl"])) init_data_str = f' = {printer.ast_to_string(ast["initData"])}' if ast.get("initData") else '' return f'{var_name_str}{array_decl_str}{init_data_str}'
def _is_from_logout(request): """ Returns whether the request has come from logout action to see if 'is_from_logout' attribute is present. """ return getattr(request, 'is_from_logout', False)
def normalize_spaces(s): """replace any sequence of whitespace characters with a single space""" return ' '.join(s.split())
def parseNeighbors_rank(urls): """Parses a urls pair string into urls pair.""" parts = urls.split(',') res=float(float(parts[2])/float(parts[3])) return parts[1], res
def load_model(known_loaders, outname, **kwargs): """If `model_type` is given, use it to load an addon model and construct that OW use default :param known_loaders: Map of baseline functions to load the model, typically a static factory method :param outname The model name to load :param kwargs: Anything required to feed the model its parameters :return: A restored model """ model_type = kwargs.get('model_type', 'default') loader_fn = known_loaders[model_type] if model_type in known_loaders else kwargs['task_fn'] return loader_fn(outname, **kwargs)
def get_P_X_past_cond_X(past_symbol_counts, number_of_symbols): """ Compute P(X_past | X), the probability of the past activity conditioned on the response X using the plug-in estimator. """ P_X_past_cond_X = [{}, {}] for response in [0, 1]: for symbol in past_symbol_counts[response]: P_X_past_cond_X[response][symbol] \ = past_symbol_counts[response][symbol] / number_of_symbols[response] return P_X_past_cond_X
def change_app_header(uri, headers, body): """ Add Accept header for preview features of Github apps API """ headers["Accept"] = "application/vnd.github.machine-man-preview+json" return uri, headers, body
def translate(old, translation_table): """Returns a new dictionary with keys based on translation_table.""" result = dict() for field, val in old.items(): if field in translation_table: result[translation_table[field]] = val else: result[field] = val return result
def is_sequence(arg): """Check if an object is iterable (you can loop over it) and not a string.""" return not hasattr(arg, "strip") and hasattr(arg, "__iter__")
def get_foot_point(point, line_p1, line_p2): """ @point, line_p1, line_p2 : [x, y, z] """ x0 = point[0] y0 = point[1] # z0 = point[2] x1 = line_p1[0] y1 = line_p1[1] # z1 = line_p1[2] x2 = line_p2[0] y2 = line_p2[1] # z2 = line_p2[2] assert not (x1 == x2 and y1 == y2), f"check line {line_p1}, {line_p2}" # k = -((x1 - x0) * (x2 - x1) + (y1 - y0) * (y2 - y1) + (z1 - z0) * (z2 - z1)) / \ # ((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)*1.0 k = -((x1 - x0) * (x2 - x1) + (y1 - y0) * (y2 - y1)) / ((x2 - x1) ** 2 + (y2 - y1) ** 2 )*1.0 xn = k * (x2 - x1) + x1 yn = k * (y2 - y1) + y1 # zn = k * (z2 - z1) + z1 return (round(xn, 6), round(yn, 6))
def pmr_corr(vlos, r, d): """ Correction on radial proper motion due to apparent contraction/expansion of the cluster. Parameters ---------- vlos : float Line of sight velocity, in km/s. r : array_like, float Projected radius, in degrees. d : float Cluster distance from the Sun, in kpc. Returns ------- pmr : array_like, float Correction in the radial component of the proper motion, in mas/yr. """ r = r * 60 # Equation 4 from Bianchini et al. 2018. pmr = -6.1363 * 1e-5 * vlos * r / d return pmr
def if_(if_func, func, arg): """Whether to apply a function or not """ if if_func(arg): return func(arg) return arg
def offsets_transform(row_offset, col_offset, transform): """Calculating new geotransform for each segment boxboundary""" new_geotransform = [ transform[0] + (col_offset * transform[1]), transform[1], 0.0, transform[3] + (row_offset * transform[5]), 0.0, transform[5]] return new_geotransform
def _normalize_typos(typos, replacement_rules): """ Applies all character replacement rules to the typos and returns a new dictionary of typos of all non-empty elements from normalized 'typos'. """ if len(replacement_rules) > 0: typos_new = dict() for key, values in typos.items(): typos_new[key] = list() for item in values: for orig, replacement in replacement_rules: item = item.replace(orig, replacement) item = item.strip() if item: typos_new[key].append(item) return typos_new else: return typos
def dfs_category_dictionary(categories, categories_df, parentId, count): """ dfs predefined predefined hierarchical categories. :param categories: each topic's predefined hierarchical categories. :param categories_df: dataframe stroes data :param parentId: id of parent categories :param count: used as id :return: final count at current category """ if categories is None: return count # iterate each category at current level for category in categories.keys(): # process one category categories_df['name'].append(category) categories_df['id'].append(count) if parentId is None: categories_df['parentId'].append("NULL") else: categories_df['parentId'].append(parentId) # go to child category, count + 1 count = dfs_category_dictionary(categories[category], categories_df, count, count + 1) # return final count at current level to parent category return count
def stringify_keys(d): # taken from https://stackoverflow.com/a/51051641 """Convert a dict's keys to strings if they are not.""" keys = list(d.keys()) for key in keys: # check inner dict if isinstance(d[key], dict): value = stringify_keys(d[key]) else: value = d[key] # convert nonstring to string if needed if not isinstance(key, str): try: d[str(key)] = value except Exception: try: d[repr(key)] = value except Exception: raise # delete old key d.pop(key, None) return d
def classify(s, data_set, suffixes=None): """ Return True or some classification string value that evaluates to True if the data in string s is not junk. Return False if the data in string s is classified as 'junk' or uninteresting. """ if not s: return False s = s.lower().strip('/') if any(d in s for d in data_set): return False if suffixes and s.endswith(suffixes): return False return True
def intersection(list_to_intersect): """ Helper method to intersect lists """ lst0 = list_to_intersect[0] for lst1 in list_to_intersect[1:]: lst0 = [value for value in lst0 if value in lst1] return lst0
def print_error(message): """ returns a message to be printed in red """ return '\033[31m{}\033[0m'.format(str(message))