content
stringlengths
42
6.51k
def fix_bayes_factor(bayes_factor): """ If one of the bayes factors is 'inf' we get a string instead of a tuple back. This is hacky but fixes that. """ # Maximum cut off for Bayes factor value max_bf = 1e12 if type(bayes_factor) == str: bayes_factor = bayes_factor.split(",") bayes_factor = [min(float(x), max_bf) for x in bayes_factor] bayes_factor = tuple(bayes_factor) bayes_factor = bayes_factor[0] return bayes_factor
def get_vggish_poolings_from_features(n_mels=96, n_frames=1360): """ Get the pooling sizes for the standard VGG-based model for audio tagging. Code from: https://github.com/keunwoochoi/transfer_learning_music/blob/master/models_transfer.py Todo: - This code is ugly, reorganise in a config file; - This method is assuming (at the moment) a certain number of frames (1360 covering 30s); """ if n_mels >= 256: poolings = [(2, 4), (4, 4), (4, 5), (2, 4), (4, 4)] elif n_mels >= 128: poolings = [(2, 4), (4, 4), (2, 5), (2, 4), (4, 4)] elif n_mels >= 96: poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (4, 4)] elif n_mels >= 72: poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (3, 4)] elif n_mels >= 64: poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (4, 4)] elif n_mels >= 48: poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (3, 4)] elif n_mels >= 32: poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (2, 4)] elif n_mels >= 24: poolings = [(2, 4), (2, 4), (2, 5), (3, 4), (1, 4)] elif n_mels >= 18: poolings = [(2, 4), (1, 4), (3, 5), (1, 4), (3, 4)] elif n_mels >= 18: poolings = [(2, 4), (1, 4), (3, 5), (1, 4), (3, 4)] elif n_mels >= 16: poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (1, 4)] elif n_mels >= 12: poolings = [(2, 4), (1, 4), (2, 5), (3, 4), (1, 4)] elif n_mels >= 8: poolings = [(2, 4), (1, 4), (2, 5), (2, 4), (1, 4)] elif n_mels >= 6: poolings = [(2, 4), (1, 4), (3, 5), (1, 4), (1, 4)] elif n_mels >= 4: poolings = [(2, 4), (1, 4), (2, 5), (1, 4), (1, 4)] elif n_mels >= 2: poolings = [(2, 4), (1, 4), (1, 5), (1, 4), (1, 4)] else: # n_mels == 1 poolings = [(1, 4), (1, 4), (1, 5), (1, 4), (1, 4)] ratio = n_frames / 1360 # as these measures are referred to this unit # print([(poo_w, pool_l * ratio) for poo_w, pool_l in poolings]) return [(poo_w, round(pool_l * ratio)) for poo_w, pool_l in poolings]
def deleteAnnotations(paths, storageIds): """Removes stored annotations from the sqlth_annotations table. Requires the full tag path (including history provider) for each annotation, as well as each annotation's storage ID. Storage ID values can be retrieved with system.tag.queryAnnotations. Args: paths (list[str]): A list of tag paths with existing annotations. The paths are equivalent to what would be used for a tag history query, and should specify the source provider as well. For example, "[HistoryProvider/Gateway:Provider]Path/To/Tag". storageIds (list[int]): A sequence of storage identifiers for If defined, these will be used to perform updates, or deletes (if the corresponding delete parameter is True). Storage id is available on the Annotation object, and is returned as the result value from the storeAnnotations call. Returns: list[QualifiedValue]: A list of qualified values. The quality code will indicate success or failure, and if successful, the storage id of the annotation will have been deleted. """ print(paths, storageIds) return None
def convert_aux_to_base(new_aux: float, close: float): """converts the aux coin to the base coin Parameters ---------- new_base, the last amount maintained by the backtest close, the closing price of the coin Returns ------- float, amount of the last aux divided by the closing price """ if new_aux: return round(new_aux * close, 8) return 0.0
def get_parent_child_from_xpath(name): """Returns the parent and child elements from XPath.""" if '/' in name: (pname, label) = name.rsplit('/', 1) else: pname = None label = name return (pname, label)
def replace_rating(x): """ Function to replace string ratings to int ratings. """ if "Elementary" in str(x): return 0 elif "Intermediate" in str(x): return 1 elif "Advanced" in str(x): return 2 elif "B2" and "C1" in str(x): return 1 elif "B1" and not "C1" in str(x): return 0 else: return 0
def GetAdminKeyName(email): """Returns a str used to uniquely identify an administrator. Args: email: str user email. Returns: A str that can be used to uniquely identify a given administrator. """ return 'Admin_' + email.lower()
def _auth_update(old_dict, new_dict): """Like dict.update, except handling the nested dict called auth.""" for (k, v) in new_dict.items(): if k == 'auth': if k in old_dict: old_dict[k].update(v) else: old_dict[k] = v.copy() else: old_dict[k] = v return old_dict
def get_filename(url: str) -> str: """Returns the filename from a link. Args: url (str): The url to a file location on the website. Returns: str: Only the filename. """ return url.split("/")[-1]
def apply_rect_mask_to_layer(source, alpha): """ Same as apply_mask_to_layer(), but for single pixels. Also always uses black background, since it's for greyscale masks. """ dest = 0 # Handle the easy cases first: if alpha == 0: return dest if alpha == 255: return source alpha_pct = alpha / 255.0 new_rgb = int(source * alpha_pct + dest * (1 - alpha_pct)) return new_rgb
def find_longest_span(text_spans): """find the longest match Args: text_spans ([TextSpan]): the set of matches we are filtering """ if len(text_spans) == 0: return None return text_spans[0] sorted_spans = sorted(text_spans, key=lambda s: len(s), reverse=True) return sorted_spans[0]
def _check_option(parameter, value, allowed_values, extra=''): """Check the value of a parameter against a list of valid options. Return the value if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- parameter : str The name of the parameter to check. This is used in the error message. value : any type The value of the parameter to check. allowed_values : list The list of allowed values for the parameter. extra : str Extra string to append to the invalid value sentence, e.g. "when using ico mode". Raises ------ ValueError When the value of the parameter is not one of the valid options. Returns ------- value : any type The value if it is valid. """ if value in allowed_values: return value # Prepare a nice error message for the user extra = ' ' + extra if extra else extra msg = ("Invalid value for the '{parameter}' parameter{extra}. " '{options}, but got {value!r} instead.') allowed_values = list(allowed_values) # e.g., if a dict was given if len(allowed_values) == 1: options = f'The only allowed value is {repr(allowed_values[0])}' else: options = 'Allowed values are ' if len(allowed_values) == 2: options += ' and '.join(repr(v) for v in allowed_values) else: options += ', '.join(repr(v) for v in allowed_values[:-1]) options += f', and {repr(allowed_values[-1])}' raise ValueError(msg.format(parameter=parameter, options=options, value=value, extra=extra))
def make_matrix(num_rows, num_cols, entry_fn): """returns a num_rows x num_cols matrix whose (i,j)-th entry is entry_fn(i, j)""" return [[entry_fn(i, j) for j in range(num_cols)] for i in range(num_rows)]
def gatvc_to_gatcv(gatvc): """ Checks if the input contains 5 parts and if so, it swaps the last two. :param gatvc: combination of groupId:artifactId:type:version:classifier where classifier is not mandatory :return: combination of groupId:artifactId:type:classifier:version if classifier available, input if not """ if gatvc and gatvc.count(":") == 4: parts = gatvc.split(":") return ":".join(parts[0:3] + [parts[4], parts[3]]) else: return gatvc
def whitener(buf, coef): """Whiten and dewhiten data according to the given coefficient.""" data = bytearray(buf) for i, byte in enumerate(data): res, mask = (0, 1) for _ in range(8): if coef & 1: coef ^= 0x88 byte ^= mask mask <<= 1 coef >>= 1 data[i] = byte ^ res return data
def Overlaps(j, k): """Inputs, j and k, are tuples/lists with a start and a end coordinate as in: (start, end). If they overlap True is returned, if they do not overlap, False is returned. """ j = sorted([int(i) for i in j]) k = sorted([int(i) for i in k]) jk = sorted([j,k], key=lambda x:x[0]) if jk[0][1] >= jk[1][0]: return True else: return False
def fill_names(vals, spws, default='spw'): """Create name base """ if len(vals)==0: base = default else: base = vals[0] return ['%s%s' % (base, spw[0]) for spw in spws]
def no_start_menu_music(on=0): """Remove a Opcao "Minhas Musicas" do Menu Iniciar DESCRIPTION Esta restricao remove a opcao "Minhas Musicas" do menu iniciar. COMPATIBILITY Windows 2000/Me/XP MODIFIED VALUES NoStartMenuMyMusic : dword : 00000000 = Desabilitado; 00000001 = Remove opcao. """ if on: return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Policies\\Explorer] "NoStartMenuMyMusic"=dword:00000001''' else: return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Policies\\Explorer] "NoStartMenuMyMusic"=dword:00000000'''
def sanitize_doc_id(doc_id): """Not strictly required, but remove slashes from Elastic Search ids""" return ':'.join(doc_id.split('/'))
def _vec_scalar(vector, scalar): """Multiply a vector by an scalar.""" return [v * scalar for v in vector]
def _strip(text): """Normalize expected strings to allow more readable definition.""" return text.lstrip('\n').rstrip(' ')
def get_op_list(arch): """ code modified from project https://github.com/naszilla/naszilla """ # given a string, get the list of operations tokens = arch.split('|') ops = [t.split('~')[0] for i,t in enumerate(tokens) if i not in [0,2,5,9]] return ops
def custom404handler(err): """Custom handler for 404 errors.""" return dict(err=err)
def get_pytest_marks_on_function(f): """ Utility to return *ALL* pytest marks (not only parametrization) applied on a function :param f: :return: """ try: return f.pytestmark except AttributeError: try: # old pytest < 3: marks are set as fields on the function object # but they do not have a particulat type, their type is 'instance'... return [v for v in vars(f).values() if str(v).startswith("<MarkInfo '")] except AttributeError: return []
def factorial(input_number: int) -> int: """ Calculate the factorial of specified number >>> factorial(1) 1 >>> factorial(6) 720 >>> factorial(0) 1 >>> factorial(-1) Traceback (most recent call last): ... ValueError: factorial() not defined for negative values >>> factorial(0.1) Traceback (most recent call last): ... ValueError: factorial() only accepts integral values """ if input_number < 0: raise ValueError("factorial() not defined for negative values") if not isinstance(input_number, int): raise ValueError("factorial() only accepts integral values") result = 1 for i in range(1, input_number): result = result * (i + 1) return result
def trim_padding(data, len): """ trim the start of the buffer until the first null, return everything after that """ if len is None: # from RSA, find first null return data[data.find(chr(0x00))+1:] else: # from RC5, follow length return data[len:]
def get_youtube_url(data: dict) -> str: """ Returns the YouTube's URL from the returned data by YoutubeDL, like https://www.youtube.com/watch?v=dQw4w9WgXcQ """ return data['entries'][0]['webpage_url']
def get_box_coord(flare_x, flare_y, box_size): """ A function that takes a flare location and returns co-ordinates for a box around the flare Param: flare_x,integer flare location x flare_y, integer flare location y box_size, length of the sides of the box return: list of tuple integers for the vertices of a box of a given size around a flare (top left, top right, bottom left, bottom right) """ return [(flare_x - 0.5*box_size, flare_y + 0.5*box_size), (flare_x + 0.5*box_size, flare_y + 0.5*box_size), (flare_x - 0.5*box_size, flare_y - 0.5*box_size), (flare_x + 0.5*box_size, flare_y - 0.5*box_size)]
def _get_new_user_identities_for_remove(exist_user_identity_dict, user_identity_list_to_remove): """ :param exist_user_identity_dict: A dict from user-assigned managed identity resource id to identity objecct. :param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove. :return A list of string of user-assigned managed identity resource ID. """ if not exist_user_identity_dict: return [] # None if user_identity_list_to_remove is None: return list(exist_user_identity_dict.keys()) # Empty list means remove all user-assigned managed identities if len(user_identity_list_to_remove) == 0: return [] # Non-empty list new_identities = [] for id in exist_user_identity_dict.keys(): if not id.lower() in user_identity_list_to_remove: new_identities.append(id) return new_identities
def create_error_response(code, jrpc_id, message): """ Function to create error response Parameters: - code: error code enum which corresponds to error response - jrpc_id: JRPC id of the error response - message: error message which corresponds to error response """ error_response = {} error_response["jsonrpc"] = "2.0" error_response["id"] = jrpc_id error_response["error"] = {} error_response["error"]["code"] = code error_response["error"]["message"] = message return error_response
def get_metric_name_from_task(task: str) -> str: """Get the name of the metric for the corresponding GLUE task. If using `load_best_model_at_end=True` in TrainingArguments then you need `metric_for_best_model=metric_name`. Use this method to get the metric_name for the corresponding GLUE task. """ if task == "stsb": return "pearson" elif task == "cola": return "matthews_correlation" else: return "accuracy"
def col(matlist, i): """ Returns the ith column of a matrix Note: Currently very expensive Examples ======== >>> from sympy.matrices.densetools import col >>> from sympy import ZZ >>> a = [ ... [ZZ(3), ZZ(7), ZZ(4)], ... [ZZ(2), ZZ(4), ZZ(5)], ... [ZZ(6), ZZ(2), ZZ(3)]] >>> col(a, 1) [[7], [4], [2]] """ matcol = [list(l) for l in zip(*matlist)] return [[l] for l in matcol[i]]
def get_four_corners_from_2_corners(x1, y1, x2, y2): """ Function returns all corners of a bounding box given 2 corners Args: x1, y1, x3, y2 (int) - top left and bottom right corners of box returns list containing all corners of box. """ return [x1, y1, x1, y2, x2, y2, x2, y1]
def coord(x_coordinate = 0, y_coordinate = 0): """function to form a coordinate string from x and y integers""" return '(' + str(x_coordinate) + ',' + str(y_coordinate) + ')'
def parse_user_prefix(prefix): """ Parses: prefix = nickname [ [ "!" user ] "@" host ] Returns: triple (nick, user, host), user and host might be None """ user = None host = None nick = prefix host_split = prefix.split('@', 1) if len(host_split) == 2: nick = host_split[0] host = host_split[1] user_split = nick.split('!', 1) if len(user_split) == 2: nick = user_split[0] user = user_split[1] return nick, user, host
def fahrenheit_to_celsius(fahrenheit: float) -> float: """ Convert a given value from Fahrenheit to Celsius and round it to 2 decimal places. >>> fahrenheit_to_celsius(0) -17.78 >>> fahrenheit_to_celsius(20.0) -6.67 >>> fahrenheit_to_celsius(40.0) 4.44 >>> fahrenheit_to_celsius(60) 15.56 >>> fahrenheit_to_celsius(80) 26.67 >>> fahrenheit_to_celsius("100") 37.78 >>> fahrenheit_to_celsius("fahrenheit") Traceback (most recent call last): ... ValueError: could not convert string to float: 'fahrenheit' """ return round((float(fahrenheit) - 32) * 5 / 9, 2)
def resolve_profile_alias(profile_name=None, region_name=None): """Wrapper around boto3.Session that behaves better with `~/.aws/config` Will follow the source_profile of `profile_name` in `~/.aws/config` if found and the only keys it has are `source_profile` and `region`. More complicated configurations are not supported for the workaround behavior, and this function reverts to the original behavior of `boto3.Session`. If `profile_name` is not found in `~/.aws/config`, this function reverts to the original behavior of `boto3.Session`. Note that this function is necessary because `boto3.Session` will ignore aliases for some complicated configurations of `source_profile`. """ import configparser from pathlib import Path config = configparser.ConfigParser() config.read(Path('~/.aws/config').expanduser()) cnt = 0 while cnt < 5: cnt += 1 if profile_name: try: profile_config = config['profile ' + profile_name] except KeyError: break else: if set(profile_config.keys()) <= {'source_profile', 'region'}: new_profile_name = profile_config.get('source_profile', profile_name) if region_name is None: region_name = profile_config.get('region', region_name) if new_profile_name == profile_name: break else: profile_name = new_profile_name else: break else: break return profile_name, region_name
def get_keys(d, *keys): """Takes a dict and returns a new dict where only given keys are present. If there is no some key in original dict, then it will be absent in the resulting dict too. """ return {key: value for key, value in d.items() if key in keys}
def predict_pecies(sepal_width=None, petal_length=None, petal_width=None): """ Predictor for species from model/52952081035d07727e01d836 Predictive model by BigML - Machine Learning Made Easy """ if (petal_width is None): return u'Iris-virginica' if (petal_width > 0.8): if (petal_width <= 1.75): if (petal_length is None): return u'Iris-versicolor' if (petal_length > 4.95): if (petal_width <= 1.55): return u'Iris-virginica' if (petal_width > 1.55): if (petal_length > 5.45): return u'Iris-virginica' if (petal_length <= 5.45): return u'Iris-versicolor' if (petal_length <= 4.95): if (petal_width <= 1.65): return u'Iris-versicolor' if (petal_width > 1.65): return u'Iris-virginica' if (petal_width > 1.75): if (petal_length is None): return u'Iris-virginica' if (petal_length > 4.85): return u'Iris-virginica' if (petal_length <= 4.85): if (sepal_width is None): return u'Iris-virginica' if (sepal_width <= 3.1): return u'Iris-virginica' if (sepal_width > 3.1): return u'Iris-versicolor' if (petal_width <= 0.8): return u'Iris-setosa'
def countObjects(skel): """ Number of object required for exporting the specified skeleton. """ if skel: nBones = skel.getBoneCount() return (2*nBones + 1) else: return 0
def check_coordinating_cjc(sw, idx, edge, c_cjc, e_cjc, n_cjc): """ Check out coordinating conjunctions """ if c_cjc != []: for cj in c_cjc: if [e[-1] for e in edge if e[0] == cj] != [e[-1] for e in edge if e[0] == cj+1]: return 0 if e_cjc != []: for ej in e_cjc: if [e[0] for e in edge if e[1] == ej] != [e[0] for e in edge if e[1] == ej+1]: return 0 if n_cjc != []: for nj in n_cjc: for e in [e[-1] for e in edge if e[0] == nj]: if (nj+1, e) in edge: return 0 for e in [e[0] for e in edge if e[-1] == nj]: if (e, nj+1) in edge: return 0 conjunction = [',', 'and', 'or', 'also', ';', 'as well as', 'comparable with', 'either', 'plus'] for e1 in edge: for e2 in edge: if e1 != e2: if e1[0] == e2[0]: count = 0 for cjc in conjunction: if cjc not in ' '.join(sw[max(idx[min(e1[1], e2[1])])+1:min(idx[max(e1[1], e2[1])])]): count += 1 if count == len(conjunction): return 0 if e1[1] == e2[1]: count = 0 for cjc in conjunction: if cjc not in ' '.join(sw[max(idx[min(e1[0], e2[0])])+1:min(idx[max(e1[0], e2[0])])]): count += 1 if count == len(conjunction): return 0 return 1
def find_first(predicate, iterable): """ Return the first element in iterable that satisfies predicate or None """ try: return next(filter(predicate, iterable)) except StopIteration: return None
def str2hex(number): """ Convert an hex based string number to int :param string number: string hex number to convert :return int: Integer value of the hex number """ return int(number, 16)
def format_version(string): """Format version input from 1.1 to 01.01.00""" version = [i.zfill(2) for i in string.split(".")] + ["00", "00"] return ".".join(version[:3])
def subtraction(x, y): """ Subtraction x and y >>> subtraction(10, 5) 5 >>> subtraction('10', 5) Traceback (most recent call last): ... AssertionError: x needs to be an integer or float """ assert isinstance(x, (int, float)), 'x needs to be an integer or float' assert isinstance(y, (int, float)), 'y needs to be an integer or float' return x - y
def ifexpr(fpredicate, ftrue, ffalse, arg): """Functional if expression. Args: fpredicate: true/false function on arg ftrue: run if fpredicate is true with arg ffalse: run if fpredicate is false with arg arg: the arg to run on the functions Returns: the result of either ftrue or ffalse """ if fpredicate(arg): return ftrue(arg) else: return ffalse(arg)
def hello(name=None): """Assuming that name is a String and it checks for user typos to return a name with a first capital letter (Xxxx). Args: name (str): A persons name. Returns: str: "Hello, Name!" to a given name, or says Hello, World! if name is not given (or passed as an empty String). """ return "Hello, World!" if name is None or not name else "Hello, {}!".format(name.title())
def contains(text, pattern): """Return a boolean indicating whether pattern occurs in text. Runtime, worst case: O(n^2), n is the len(text) Runtime, best case: O(1), if text == '' Space Complexity: O(n), n is the len(text) """ assert isinstance(text, str), 'text is not a string: {}'.format(text) assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text) if text == '': return False elif pattern == '': return True if text[0] == pattern[0]: # if the first characters match if len(pattern) == 1: # and the pattern is only 1 character return True for i in range(1, len(pattern)): # loop 1 through length of pattern if len(text) < len(pattern): # check there are enough characters left return False if text[i] == pattern[i]: # if the pattern matches if i == len(pattern) - 1: # check if the pattern is over return True else: # if the pattern stops matching text = text[i:] # set text to after current index return contains(text, pattern) # start again else: # if no match text = text[1:] # set text to after current index return contains(text, pattern) # start again
def ddvv(xyz1, xyz2): """Distance squared between two vectors.""" dx=xyz1[0]-xyz2[0] dy=xyz1[1]-xyz2[1] dz=xyz1[2]-xyz2[2] return dx*dx+dy*dy+dz*dz
def get_matching_connections(cxs_1, cxs_2): """returns connections in cxs_1 that share an innovation number with a connection in cxs_2 and connections in cxs_2 that share an innovation number with a connection in cxs_1""" return sorted([c1 for c1 in cxs_1 if c1.innovation in [c2.innovation for c2 in cxs_2]], key=lambda x: x.innovation),\ sorted([c2 for c2 in cxs_2 if c2.innovation in [c1.innovation for c1 in cxs_1]], key=lambda x: x.innovation)
def concat_relations(relations, offsets): """combine relation dictionaries from multiple datasets Args: relations (list): list of relation dict to combine offsets (list): offset to add to the indices Returns: new_relations (dict): dictionary of combined relations """ new_relations = {} for relation, offset in zip(relations, offsets): old_keys = relation.keys() new_keys = [(id1 + offset, id2 + offset) for id1, id2 in old_keys] # make a new dict with updated keys relation = dict(zip(new_keys, relation.values())) new_relations.update(relation) return new_relations
def getJ1939ProtocolString(protocol = 1, Baud = "Auto", Channel = -1, SampleLocation = 95, SJW = 1, PROP_SEG = 1, PHASE_SEG1 = 2, PHASE_SEG2 = 1, TSEG1 = 2, TSEG2 = 1, SampleTimes = 1) -> bytes: """ Generates fpchProtocol string for ClientConnect function. The default values you see above were made up on the spot and shouldn't necessarily be used. Keyword arguments have the same names as below (e.g. Baud, SampleLocation, PHASE_SEG1). IDSize is automatically set to 29 whenever relevant because that is its only valid value. This function also accepts a Channel argument! Examples: - protocol1 = J1939.getProtocolString(protocol = 1, Baud = "Auto") - protocol2 = J1939.getProtocolString(protocol = 3, Baud = 500, SampleLocation = 75, SJW = 3, IDSize = 29) """ if Channel != -1: chan_arg = f",Channel={str(Channel)}" else: chan_arg = "" if protocol == 1: return bytes(f"J1939:Baud={str(Baud)}" + chan_arg, 'utf-8') elif protocol == 2: return bytes("J1939" + chan_arg, 'utf-8') elif protocol == 3: return bytes(f"J1939:Baud={str(Baud)},SampleLocation={str(SampleLocation)},SJW={str(SJW)},IDSize=29" + chan_arg, 'utf-8') elif protocol == 4: return bytes(f"J1939:Baud={str(Baud)},PROP_SEG={str(PROP_SEG)},PHASE_SEG1={str(PHASE_SEG1)},PHASE_SEG2={str(PHASE_SEG2)},SJW={str(SJW)},IDSize=29" + chan_arg, 'utf-8') elif protocol == 5: return bytes(f"J1939:Baud={str(Baud)},TSEG1={str(TSEG1)},TSEG2={str(TSEG2)},SampleTimes={str(SampleTimes)},SJW={str(SJW)},IDSize=29" + chan_arg, 'utf-8') else: return b"J1939"
def add_metric_suffix(num: int): """ Adds the classic (b, m, k) suffixes to the end of a number :param num: The number to add the suffix to """ # Billion if num >= 1000000000: x = num / 1000000000 return '{:,}b'.format(int(x) if 1 % x == 0 else round(x, 1)) # Million if num >= 1000000: x = num / 1000000 return '{:,}m'.format(int(x) if 1 % x == 0 else round(x, 1)) # Thousand if num >= 1000: x = num / 1000 return '{:,}k'.format(int(x) if 1 % x == 0 else round(x, 1)) return '{:,}'.format(int(num))
def all_tasks(loop=None): """Return a set of all tasks for the loop.""" # We could do this, but we will not. return {}
def is_integer(string): """Checks if the string is an integer Args: string (str): The string to check Returns: Boolean: Whether the string could be converted to an integer or not """ try: int(string) return True except ValueError: return False
def upstream_authority(authority_and_code): """ Authority of the upstream API used to the validation of the requests from APIcast """ authority, _ = authority_and_code return authority
def expand_sided_value(value): """Returns 4-tuple with values corresponding to top, right, bottom, and left. Possible inputs: style /* One-value syntax */ E.g. 1em; vertical horizontal /* Two-value syntax */ E.g. 5% auto; top horizontal bottom /* Three-value syntax */ E.g. 1em auto 2em; top right bottom left /* Four-value syntax */ E.g. 2px 1em 0 auto; """ if not isinstance(value, (tuple, list)): return (value,) * 4 elif len(value) == 1: return tuple(value) * 4 elif len(value) == 2: vertical, horizontal = value return (vertical, horizontal) * 2 elif len(value) == 3: top, horizontal, bottom = value return (top, horizontal, bottom, horizontal) elif len(value) == 4: return tuple(value) else: raise ValueError('Invalid collapsed four-sided value: %r' % value)
def no_op(event, context): """Don't do anything. Args: event (dict): Event data passed to handler context (object): Runtime information """ # Just return a success code. return True
def binarySearch(match_item, itemList): """ match_item and itemList's data should always be on their normalized form """ left = 0 right = len(itemList) - 1 while left <= right: mid = left + (right - left) // 2 if itemList[mid] == match_item: return mid # For these kind of comparison we need to normalize the data beforehand elif itemList[mid] > match_item: right = mid - 1 else: left = mid + 1 return -1
def leap_year(y): """ @purpose Returns whether the year is leap year or not @param y: the year to test for leap year @complexity: Best Case: O(2): When user input a year before 1582 Worst Case: O(7): When user enter a year after 1582 @precondition Passing a valid year in integer/numerical @postcondition Return whether the year is leap or not """ if y >= 1582: if (y % 4 == 0 and y % 100 != 0) or y % 400 == 0: return 1 else: return 0 else: return -1
def read_kfold_config(split: dict): """ KFold values reader This function ensures that the parameters of the KFold splitting method are defined. :param dict split: A dictionary that contains the parameters about the KFold splitting method. :return: - n_fold - An integer that refers to the number of folds which will be used for cross-validation. - shuffle - A boolean. If true, data will be shuffled before splitting it to multiple folds. - random_state - An integer which helps to reproduce the results. """ try: n_fold = split["fold_nr"] # number of folds except Exception as e: print(f"fold_nr is not provided: {e}") n_fold = 5 try: shuffle = split["shuffle"] except Exception as e: print(f"shuffle is not provided: {e}") shuffle = True try: random_state = split["random_state"] except Exception as e: print(f"random_state is not provided: {e}") random_state = 1 return n_fold, shuffle, random_state
def indent_new_lines(text: str, num: int = 4): """Inserts spaces at the beginning of each new line""" return text.replace("\n", "\n" + (" " * num))
def mapvalue(value, leftMin, leftMax, rightMin=0.0, rightMax=1.0): """ map a value between two ranges """ # Figure out how 'wide' each range is leftSpan = leftMax - leftMin rightSpan = rightMax - rightMin # Convert the left range into a 0-1 range (float) valueScaled = float(value - leftMin) / float(leftSpan) # Convert the 0-1 range into a value in the right range. return rightMin + (valueScaled * rightSpan)
def robustmin(data): """Like min() but handles empty Lists.""" if data: return min(data) return None
def build_image_name(config, os_type, os_version): """build_image_name Returns a standard docker image name based upon package parameters """ name = "%s%s-%s-builder" % (os_type, os_version, config['project']) return name
def create_ogblob(ogheader): """create <meta> tags from Open Graph dict if ogheader is empty dict, then return empty string. if ogheader has some values but not the minimum, then raise ValueError. if og:image is a path, not a URL of http or https scheme, then 'https://help.rerobots.net/' is prepended to it. """ if len(ogheader) == 0: return '' if not ('title' in ogheader and 'url' in ogheader and 'image' in ogheader): raise ValueError('some but not all required metadata provided for Open Graph') if not (ogheader['image'].startswith('http://') or ogheader['image'].startswith('https://')): ogheader['image'] = 'https://help.rerobots.net/' + ogheader['image'] blob = """ <meta property="og:type" content="website" /> <meta property="og:url" content="{URL}" /> <meta property="og:title" content="{TITLE}" /> <meta property="og:image" content="{IMAGE}" /> """.format(URL=ogheader['url'], TITLE=ogheader['title'], IMAGE=ogheader['image']) return blob
def suffix_eq(text, pos, expected): """Check if the suffix of text starting at pos equals expected. This is effectively equivalent to ``text[pos:] == expected``, but avoids allocating a huge substring. If ``pos`` is 0, this is equal to ``text == expected``. :param str text: Text to compare. :param int pos: Starting position for the suffix. :param str expected: String to compare to. """ if not pos: return text == expected return (len(text) - pos == len(expected) and all(text[i + pos] == expected[i] for i in range(len(expected))))
def rangify(number_list): """Assumes the list is sorted.""" if not number_list: return number_list ranges = [] range_start = prev_num = number_list[0] for num in number_list[1:]: if num != (prev_num + 1): ranges.append((range_start, prev_num)) range_start = num prev_num = num ranges.append((range_start, prev_num)) return ranges
def RPL_VERSION(sender, receipient, message): """ Reply Code 351 """ return "<" + sender + ">: " + message
def config_dict_to_object(config_dict): """Recursively converts a dictionary to an object Args: config_dict: config dictionary to convert Returns: ConfigObj configuration object """ def convert(item): if isinstance(item, dict): return type('ConfigObj', (), {k: convert(v) for k, v in item.items()}) if isinstance(item, list): def yield_convert(list_item): for index, value in enumerate(list_item): yield convert(value) return list(yield_convert(item)) else: return item return convert(config_dict)
def merge_dicts(x, y): """ Merges the entries specified by two dicts. """ z = x.copy() z.update(y) return z
def trim_word_space(words, possible_letters, known_letters): """ Trim the word space by removing words that don't have the letters we know """ valid_words = words.copy() for word in words: for i in range(len(word)): if word[i] not in possible_letters[i]: valid_words.remove(word) break for letter in known_letters: for word in valid_words.copy(): if letter not in word: valid_words.remove(word) return valid_words
def safe_xml_tag_name( name: str, numeric_prefix: str = "tag-", empty_fallback: str = "empty-tag" ) -> str: """ Returns a safe xml tag name by replacing invalid characters with a dash. :param name: The name that must be converted to a safe xml tag name. :param numeric_prefix: An xml tag name can't start with a number, so if that is the case, then this value will be prepended. :param empty_fallback: An xml tag name can't be empty, so if that is the case, then this fallback value will be returned. :return: A safe name that can be used as xml tag. """ safe_name = "" for char in name: if char.isalnum(): safe_name += char else: safe_name += "-" while "--" in safe_name: safe_name = safe_name.replace("--", "-") if safe_name.startswith("-"): safe_name = safe_name[1:] if safe_name.endswith("-"): safe_name = safe_name[:-1] if len(safe_name) > 0 and safe_name[0].isnumeric(): safe_name = f"{numeric_prefix}{safe_name}" if len(safe_name) == 0: return empty_fallback return safe_name
def line_text(*fields, **kargs): """ Transform a list of values into a tsv line :param fields: list of values as parameters :param kargs: optional arguments: null_value: value to interpret as None :return: tsv line text """ null_value = kargs.get("null_value", "") return "\t".join([str(x) if x is not None else null_value for x in fields]) + "\n"
def bytelist2string(bytelist): """ Convert a list of byte values (e.g. [0x10 0x20 0x00]) to a string (e.g. '\x10\x20\x00'). """ return ''.join(chr(b) for b in bytelist)
def pad_block(block: bytes, block_size: int) -> bytes: """ Pads a plaintext block of bytes to the desired block size using PKCS#7 padding :param block: :param block_size: :return: The original block padded using PKCS#7 padding. If the block is already greater than or equal to the desired block size, then the original block parameter is returned unaltered. """ pad = block_size - len(block) if pad <= 0: # if already of desired size or greater, return the original block. return block output = bytearray(block_size) for i in range(0, len(block)): output[i] = block[i] # copy over values to the resized block for i in range(len(block), block_size): # iterate over non-copied spaces output[i] = pad # Add padding to the end return output
def _get_nodes(x, prefix=""): """ Args: x: a tree where internal nodes are dictionaries, and leaves are lists. prefix: not meant to be passed. The parent prefix of a label. e.g. given A -> B -> C, the parent prefix of C is 'A [sep] B'. sep: the separator to use between labels. Could be 'and', '-', or whatever Returns: All nodes in the hierarchy. Each node is given by a string A [sep] B [sep] C etc. """ res = [] q = [(x, prefix)] while q: x, prefix = q.pop() if isinstance(x, list): res.extend([prefix + k for k in x]) else: for k, v in x.items(): res.append(prefix + k) q.append((v, prefix + k + " - ")) return list(set(res))
def _diff_properties(expected, current): """Calculate the difference between the current and the expected properties * 'expected' is expressed in a dictionary like: {'property': value} * 'current' contains the same format retuned by 'btrfs.properties' If the property is not available, will throw an exception. """ difference = {} for _property, value in expected.items(): current_value = current[_property]["value"] if value is False and current_value == "N/A": needs_update = False elif value != current_value: needs_update = True else: needs_update = False if needs_update: difference[_property] = value return difference
def get_next_xp(level: int) -> int: """Returns the xp need to level up""" return (level + 1) ** 3
def make_meters_map(meters, meter): """Add meter to meters by its ID.""" meters[str(meter["id"])] = meter del meter["id"] return meters
def get_rounds(number): """ :param number: int - current round number. :return: list - current round and the two that follow. """ return [number, number + 1, number + 2]
def compile_word(word): """Compile a word of uppercase letters as numeric digits. E.g., compile_word('YOU') => '(1*U+10*O+100*Y)' Non-uppercase words unchanged: compile_word('+') => '+'""" if word.isupper(): terms = [('%s*%s' % (10**i, d)) for (i, d) in enumerate(word[::-1])] return '(' + '+'.join(terms) + ')' else: return word
def target_path(log_file): """Return a target location for an RDS log file. Given an RDS log file name ('error/postgresql.log.2020-03-03-21'), return a 2-tuple ('error/<date>/', 'postgresql.log.2020-03-03-21') representing the target path and filename to save the file. """ date = log_file.rsplit('.', 1)[-1].rsplit('-', 1)[0] path, name = log_file.rsplit('/', 1) return f'{path}/{date}/', name
def flatten(x: float, y: float, z: float, scale: int, distance: int) -> tuple: """ Converts 3d point to a 2d drawable point ```python >>> flatten(1, 2, 3, 10, 10) (7.6923076923076925, 15.384615384615385) ``` """ projected_x = ((x * distance) / (z + distance)) * scale projected_y = ((y * distance) / (z + distance)) * scale return projected_x, projected_y
def _split_n_in_m_parts(n, m): """ Split integer n in m integers which sum up to n again, even if m * (n//m) != n. """ a = [n // m] * m a[0] = n - (m - 1) * a[0] return a
def leapYear(year: int = 2020)-> None: """ This function tests and `prints` whether a given year is a `Leap year` or not. Returns `None` if bound to a variable. Args: This function takes exactly one argument. `year: int` : The literal of this argument should be of `integer (int)` data type. """ if year % 4 == 0: if year%100 == 0: if year %400 == 0: print(f"{year} is a leap year") else: print(f"{year} is not a leap year") else: print(f"{year} is a leap year") else: print(f"{year} is not a leap year") return None
def check_lead_zero(to_check): """Check if a number has a leading 0. Args: to_check (str): The number to be checked. Returns: True if a leading 0 is found or the string is empty, False otherwise. """ if str(to_check) in (None, ''): return True elif str(to_check[0]) != '0': return False else: return True
def detect_clause(parser, clause_name, tokens): """Helper function detects a certain clause in tag tokens list. Returns its value. """ if clause_name in tokens: t_index = tokens.index(clause_name) clause_value = parser.compile_filter(tokens[t_index + 1]) del tokens[t_index:t_index + 2] else: clause_value = None return clause_value
def browse(i): """ Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ # TBD: should calculate url url='https://github.com/ctuning/ck-crowdtuning-platforms/tree/master/platform.cpu' import webbrowser webbrowser.open(url) import time time.sleep(3) url='http://cknowledge.org/repo/web.php?action=index&module_uoa=wfe&native_action=show&native_module_uoa=platform.cpu' import webbrowser webbrowser.open(url) return {'return':0}
def normalize_id(id): """Normalize any ids used in the IOC to make the compatible with CybOX This is just in case the normal UUID type is not used. """ if id is None: return None return id.replace(":", "-")
def is_group_in_sorted(s: str) -> bool: """Assuming s is sorted, is there a repeated character?""" return any(x >= 2 for x in map(s.count, s))
def get_submit_attr_str(submit_attrs): """Convert submit attributes from a dictionary form to the corresponding configuration string Args: submit_attrs (dict): the dictionary containing the submit attributes Returns: string: the string representing the xml submit attributes section for a single entry """ out = "" if submit_attrs: for name, value in sorted(submit_attrs.items()): if value is not None: out += f'\n <submit_attr name="{name}" value="{value}"/>' return out
def invert_dict(dic): """ """ return {dic[k]:k for k in dic}
def remove_unspecified_unicode(value): """In a very small number of cases, NiH has some pre-processed badly formatted unspecifed unicode characters. The following recipe seems to clean up all of the discovered cases.""" # These are all spaces for char in ('\xa0\xa0', '\xa0 ', '\xa0'): value = value.replace(char, ' ') # This is a hyphen value = value.replace('-\xad', '-') # Clean up any bonus spaces while ' ' in value: value = value.replace(' ', ' ') return value
def find_option(block, key, type, default, loud=False, layer_name=None, layer_idx=-1): """ ---------- Author: Damon Gwinn (gwinndr) ---------- - Finds the option specified by key and sets the value according to type - If option not found, uses default - If default is used and loud is True, prints that the default is being used - layer_name (str) and layer_idx (int) further specify layer details when printing with loud ---------- """ if(key in block.keys()): val = block[key] else: val = default if(loud): if(layer_name is None): label = "" else: label = "%s at idx %d:" % (layer_name, layer_idx) print(label, "Using default:", default, "for key:", key) if(val is not None): val = type(val) return val
def detectLoop(head): """ Take two pointers one that moves one step, the other moves two steps if at any point both meet its a loop Imagine it in a form of circle, where the first pointer completes half the second has completed one full cycle. Again when the first completes one half the other completes the full cycle and they meet where they started. The other solution is to keep a map of visited nodes and return True if we found a node that is already visited """ f = head s = head while f and s and s.next: f = f.next s = s.next.next if f == s: return True return False
def git_config_bool (value): """Convert the given git config string value to True or False. Raise ValueError if the given string was not recognized as a boolean value. """ norm_value = str(value).strip().lower() if norm_value in ("true", "1", "yes", "on", ""): return True if norm_value in ("false", "0", "no", "off", "none"): return False raise ValueError("Failed to parse '%s' into a boolean value" % (value))
def lim_eps(a, eps): #================================================================== """ Return min / max of an array a, increased by eps*(max(a) - min(a)). Handy for nice looking axes labeling. """ mylim = (min(a) - (max(a)-min(a))*eps, max(a) + (max(a)-min(a))*eps) return mylim
def generate_job_name( endpoint_name: str, perturb_prefix: str, dataset_name: str, timestamp: int ): """Generate the job name with a given timestamp. The timestamp need to be properly spaced out, because they need to be unique across all jobs in the same AWS region. This is taken care of by `_set_jobname_with_unique_timestamp` """ suffix = f"-{timestamp}" prefix = "-".join(filter(None, (endpoint_name, perturb_prefix, dataset_name))) # :63 is AWS requirement, and we want to keep the timestamp intact return prefix[: 63 - len(suffix)] + suffix
def is_manual_format_params(format_params): """Says if the format_params is from a manual specification See Also: is_automatic_format_params """ assert not isinstance( format_params, str ), "format_params can't be a string (perhaps you meant is_manual_format_string?)" return all((x is not None) for x in format_params)