content
stringlengths
42
6.51k
def new_list_with_replaced_item(input_list, index, new_value): """Creates new list with replaced item at specified index""" output = [i for i in input_list] output[index] = new_value return output
def aggregate(d): """ :param d: dict from squeeze :return: aggregated dict """ for y in d.keys(): for h in d[y].keys(): d[y][h] = sum(d[y][h]) * 1.0 / len(d[y][h]) return d
def src2obsmod( src ): """guesses whether the source string is for model or obs, prefer obs""" if src.find('model')>=0: typ = 'model' else: typ = 'obs' return typ
def get_predict_fstat_parameters_from_dict(signal_parameters, transientWindowType=None): """Extract a subset of parameters as needed for predicting F-stats. Given a dictionary with arbitrary signal parameters, this extracts only those ones required by `helper_functions.predict_fstat()`: Freq, Alpha, Delta, h0, cosi, psi. Also preserves transient parameters, if included in the input dict. Parameters ---------- signal_parameters: dict Dictionary containing at least those signal parameters required by helper_functions.predict_fstat. This dictionary's keys must follow the PyFstat convention (e.g. F0 instead of Freq). transientWindowType: str Transient window type to store in the output dict. Currently required because the typical input dicts produced by various PyFstat functions tend not to store this property. If there is a key with this name already, its value will be overwritten. Returns ------- predict_fstat_params: dict The dictionary of selected parameters. """ required_keys = ["F0", "Alpha", "Delta", "h0", "cosi", "psi"] transient_keys = { "transientWindowType": "transientWindowType", "transient_tstart": "transientStartTime", "transient_duration": "transientTau", } predict_fstat_params = {key: signal_parameters[key] for key in required_keys} for key in transient_keys: if key in signal_parameters: predict_fstat_params[transient_keys[key]] = signal_parameters[key] if transientWindowType is not None: predict_fstat_params["transientWindowType"] = transientWindowType return predict_fstat_params
def full(left, right): """Returns keys from right to left for all keys that exist in either.""" return set(left.keys()) | set(right.keys())
def generate_record_identifiers(metadata): """Generates additional record identifiers metadata. https://oarepo.github.io/publications-api/schemas/publication-dataset-v1.0.0.html#allOf_i0_allOf_i1_identifiers """ return [{'identifier': metadata.pop('doi'), 'scheme': 'doi'}]
def px(mm, dpi): """ Return the lenght in pixels of a distance in millimeter at a given resolution. """ return(int(round((mm / 25.4) * dpi)))
def dictToTuple(heading, d): """Convert dict into an ordered tuple of values, ordered by the heading""" return tuple([d[attr] for attr in heading])
def _dict_repr_to_constructor_syntax(string): """ In string, convert literal dict reprs like ``{'x': 1}`` to constructors like ``dict(x=1)``. Only works for dicts without string values (which is fine for a typespec, where values are always Proxytypes) """ return ( string.replace("{", "dict(") .replace("}", ")") .replace(": ", "=") .replace("'", "") )
def hello(who): """function that greats""" return "greeting " + who
def basicStats(data): """Calculates basic statistics from a given array """ if (len(data) > 1): import numpy as N median = N.median(data) mean = N.mean(data) std = N.std(data) max = N.max(data) min = N.min(data) var = N.var(data) return mean, median, std, var, max, min else: return (-99,)*6
def isqrt(n: int) -> int: """Return the square root of an integer.""" x = n y = (x + 1) // 2 while y < x: x, y = y, (y + n // y) // 2 return x
def dms(d, delim=':', output_string=False): """Convert degrees, minutes, seconds to decimal degrees, and back. EXAMPLES: dms('150:15:32.8') dms([7, 49]) dms(18.235097) dms(18.235097, output_string=True) Also works for negative values. SEE ALSO: :func:`hms` """ # 2008-12-22 00:40 IJC: Created # 2009-02-16 14:07 IJC: Works with spaced or colon-ed delimiters # 2015-03-19 21:29 IJMC: Copied from phot.py. Added output_string. from numpy import sign if d.__class__==str or hasattr(d, '__iter__'): # must be HMS if d.__class__==str: d = d.split(delim) if len(d)==1: d = d[0].split(' ') s = sign(float(d[0])) if s==0: s=1 degval = float(d[0]) if len(d)>=2: degval = degval + s*float(d[1])/60.0 if len(d)==3: degval = degval + s*float(d[2])/3600.0 return degval else: # must be decimal degrees if d<0: sgn = -1 else: sgn = +1 d = abs(d) deg = int(d) min = int((d-deg)*60.0) sec = (d-deg-min/60.0)*3600.0 ret = (sgn*deg, min, sec) if output_string: ret = '%+03i:%02i:%04.2f' % ret return ret
def is_beta(skill_name, skill_list): """ Get skill data structure from name. """ for e in skill_list: if e.get('name') == skill_name: return e.get('beta', False) return False
def ProfitArrayToHierarchy_profitPh(oneway,prune=None): # add old hierarchy as second parameter to add to it # hierarchy follows this simple structure: """ fromId toId commodId=traderow toId commodId=traderow fromId toId commodId=traderow toId commodId=traderow """ if prune is None: prune=dict() for way in oneway: if way["AbaseId"] == way["BbaseId"]: # anomalous data discard continue if not way["AbaseId"] in prune: prune[way["AbaseId"]]=dict() if not way["BbaseId"] in prune[way["AbaseId"]]: prune[way["AbaseId"]][way["BbaseId"]]=way else: if prune[way["AbaseId"]][way["BbaseId"]]['profitPh']<way['profitPh']: prune[way["AbaseId"]][way["BbaseId"]]=way return prune
def multiply_even(num): """ Return sum of multiple of all even number below user specified range """ result = 1 for i in range(2,num, 2): result*=i return result
def check_region(data, x, y, match): """Compares a region to the given """ w = len(match[0]) for cy in range(len(match)): if match[cy] != data[y+cy][x:x+w]: return False return True
def to_flag(arg_str: str) -> str: """ Utility method to convert from the name of an argparse Namespace attribute / variable (which often is adopted elsewhere in this code, as well) to the corresponding flag :param arg_str: Name of the arg :return: The name of the flag (sans "--") """ return arg_str.replace("_", "-")
def encode_string(value): """ Encode special character, \t \n """ value = str.replace(value, '\t', '\\t') value = str.replace(value, '\n', '\\n') return value
def bits_to_hex(bit_array, delimiter=":"): """Convert a bit array to a prettily formated hex string. If the array length is not a multiple of 8, it is padded with 0-bits from the left. For example, [1,0,0,1,1,0,1,0,0,1,0] becomes 04:d2. Args: bit_array: the bit array to convert Returns: the formatted hex string.""" # Pad the first partial byte. partial_bits = len(bit_array) % 8 pad_length = 8 - partial_bits if partial_bits else 0 bitstring = "0" * pad_length + "".join(map(str, bit_array)) byte_array = [int(bitstring[i: i + 8], 2) for i in range(0, len(bitstring), 8)] return delimiter.join(map(lambda x: "%02x" % x, byte_array))
def centroid(X): """ Calculate the centroid from a vectorset X. https://en.wikipedia.org/wiki/Centroid Centroid is the mean position of all the points in all of the coordinate directions. C = sum(X)/len(X) Parameters ---------- X : np.array (N,D) matrix, where N is points and D is dimension. Returns ------- C : float centeroid >>> centroid([[-1.45300e-02, 1.66590e+00, 1.09660e-01], [-1.46000e-03, 2.68140e-01, 6.35100e-02],[-2.78130e-01, -2.16050e-01, 1.52795e+00]]) (-0.09804, 0.5726633333333333, 0.56704) """ s = [0.0, 0.0, 0.0] num = 0 for line in X: num += 1 for n, v in enumerate(line): s[n] += v return (s[0] / num, s[1] / num, s[2] / num)
def get_from_subtree(subtree, key): """ Find the value associated to the key in the given subtree :param subtree: the subtree to search in :param key: the key to search for :return: the value associated to the key in the given subtree or None if this key is not in the subtree """ temp_subtree = subtree while temp_subtree is not None: if key == temp_subtree.key: return temp_subtree.value elif key < temp_subtree.key: temp_subtree = temp_subtree.left elif key > temp_subtree.key: temp_subtree = temp_subtree.right return None
def write_vcf(dframe): """Variant Call Format (VCF) for SV loci.""" return NotImplemented # See export.export_vcf()
def get_makefile_name(libname): """ Given a library name, return the corresponding Makefile name. """ return "Makefile.%s" % libname
def top_sentences(query, sentences, idfs, n): """ Given a `query` (a set of words), `sentences` (a dictionary mapping sentences to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the `n` top sentences that match the query, ranked according to idf. If there are ties, preference should be given to sentences that have a higher query term density. """ l = [] for i in sentences: idf, words = 0, 0 for word in query: if word in sentences[i]: words += 1 idf += idfs[word] l.append((i, idf, (float(words) / len(sentences[i])))) l.sort(key=lambda x: (x[1], x[2]), reverse=True) return [x[0] for x in l[:n]]
def _clear_data(data, need_fields_tuple): """Check and clear data""" need_fields = list(need_fields_tuple) result = data.copy() for fields in data: try: need_fields.remove(fields) except ValueError: result.pop(fields) if len(need_fields) != 0: return False return result
def transform_search_result_match(definition_id, definition_result, path, state): """ Transforms the provided dataset definition search result match data into a `field->field-value` dict. :param definition_id: associated dataset definition :param definition_result: search result :param path: associated entity path :param state: associated entity state :return: the flattened result """ entry = '{} ({})'.format( definition_result['entry_id'], definition_result['entry_created'] ) if state['entity_state'] != 'existing' else state['entry'] return { 'definition': definition_id, 'info': definition_result['definition_info'], 'entity': path, 'state': state['entity_state'], 'entry': entry, }
def get_dynamic_attr_vals(obj, ignore_attrs=None, include_private=False): """Returns a dict of attribute values to encode. keys = attribute names, values = attribute values. argmuents ========== * obj - object, object to get dynamic attribute values from. * ignore_attrs - list or tuple of attributes to ignore. Default = empty tuple. * include_private - bool, if False do not include attributes that start with '_'. Default = False. """ vals = {} if hasattr(obj, '__dict__'): for attr, val in obj.__dict__.iteritems(): if ignore_attrs is not None: if attr in ignore_attrs: continue if (include_private is False) and (attr.startswith('_')): continue vals[attr] = val return vals
def get_interpolation_range(sidelen, n, i): """ Finds the range of indices for interpolation in Robinson Projection Input sidelen: the number of items on both sides of i, including i in the left n: the total number of items i: the index of the largest item smaller than the value Output ileft: the left index of the value (inclusive) iright: the right index of the value (noninclusive) """ if i<sidelen: ileft = max([0, i-sidelen+1]) else: ileft = i-sidelen+1 if i>=n-sidelen: iright = min(n, i+sidelen+1) else: iright = i+sidelen+1 return ileft, iright
def guess_windows(buff): """ windows heuristics 0xA:0xD (error code): 0 => success, almost always linux: endpoint, device, bus id. Unlikely to be 0 0x10 (IRP information): either 0 or 1 """ if len(buff) < 0x24: return False return sum(buff[0x0A:0x0E]) == 0
def get_first_non_empty(inputList, num): """ Get the first non-empty items in a list of strings. :param inputList: A list of possibly empty strings. :param num: The number of items to get at most. :return: A list of non-empty strings or an empty list, if no non-empty strings were found. """ i = num outputList = [] for item in inputList: if item.strip() == '': continue outputList.append(item.strip()) i -= 1 if i <= 0: break return outputList
def get_target(data): """ Gets the target pose. """ return data['target']['pose']
def same_sentence_check(block: str, pos1: str, pos2: str) -> bool: """Check if two words are in the same sentence by looking for sentence delimiters between their starting positions. :param block: Block of string text the two words are found in :param pos1: The index of the beginning of word1 :param pos2: The index of the beginning of word2 :return: true if they word1 and word2 are not separated by one of the follwing sentence delimiters ., ;, ?, !, false otherwise """ if pos1 < pos2: interstring = block[int(pos1):int(pos2)] else: interstring = block[int(pos2):int(pos1)] sentence_enders = [".", ";", "?", "!"] return all(s not in interstring for s in sentence_enders)
def map_to_2D_dict(mapping_data): """ Converts mapping file to 2D dictionary INPUT: mapping_data -- a tab delimited string from the opened mapping file OUTPUT: D2 -- a two dimensional dictionary where each sample ID is keyed to a dictionary of meta data containing headers and observations """ lines = [l.strip().split('\t') for l in mapping_data] header = lines[0] D2 = {} for l in lines[1:]: inner = {k: v for k, v in zip(header, l)} sample_id = inner['#SampleID'] D2[sample_id] = inner return D2
def rebin(L, n=2): """ Group every n elements, and add them together. Division by n creates the boxcar average. """ result = list() for i in range(len(L)//n): result.append(sum(L[(i*n):((i+1)*n)])) return(result)
def CCW_ps(Pxx, Pyy, Pxy): """Counter clockwise power spectrum.""" QS = Pxy.imag return (Pxx + Pyy + 2*QS)/2.
def objtype_and_objfield_to_reftype(objtype:str, objfield:str) -> str: """Helper function for converting object infos to reftype(role name).""" return objtype + '.' + objfield
def parse_width(width, n): """Parses an int or array of widths Parameters ---------- width : int or array_like n : int """ if isinstance(width, int): widths = [width] * n else: assert len(width) == n, "Widths and data do not match" widths = width return widths
def chinese_date(cycle, year, month, leap, day): """Return a Chinese date data structure.""" return [cycle, year, month, leap, day]
def list_style_position(keyword): """``list-style-position`` property validation.""" return keyword in ('inside', 'outside')
def generate_new_row(forecast_date, target, target_end_date, location, type, quantile, value): """ Return a new row to be added to the pandas dataframe. """ new_row = {} new_row["forecast_date"] = forecast_date new_row["target"] = target new_row["target_end_date"] = target_end_date new_row["location"] = location new_row["type"] = type new_row["quantile"] = quantile new_row["value"] = value return new_row
def checkout_path(workspace, repo): """Get the checkout path for a repo""" return workspace + "/" + repo + ".src"
def preferred_rep(t): """ Find the preferred representative of a slope in QP^1. """ a, b = t out = (a,b) if a < 0 or (a == 0 and b < 0): out = (-a, -b) return out
def pressure_plane_strain_R_disp(r, p, ri, ro, E, nu, dT, alpha): """ Assumption of plane strain (axial strain = 0) """ A = ri**2 * ro**2 * -p / (ro**2 - ri**2) C = p * ri**2 / (ro**2 - ri**2) u = (-A*nu - A + C*r**2*(-2*nu**2 - nu + 1))/(E*r) return u
def rev(n): """ rev reverses the linked list n. """ prev = None while n is not None: nxt = n.next n.next = prev # point n backwards prev = n # update prev n = nxt # update n return prev
def is_azimuth_close( first: float, second: float, tolerance: float, halved: bool = True ): """ Determine are azimuths first and second within tolerance. Takes into account the radial nature of azimuths. >>> is_azimuth_close(0, 179, 15) True >>> is_azimuth_close(166, 179, 15) True >>> is_azimuth_close(20, 179, 15) False :param first: First azimuth value to compare. :param second: Second azimuth value to compare. :param tolerance: Tolerance for closeness. :param halved: Are the azimuths azial (i.e. ``halved=True``) or vectors. """ diff = abs(first - second) if halved: diff = diff if diff <= 90 else 180 - diff assert 0 <= diff <= 90 else: diff = diff if diff <= 180 else 360 - diff assert 0 <= diff <= 180 return diff < tolerance
def circle(args): """Circle""" return args[0]**2 + args[1]**2 - 2.0
def short_id_from_s3_key(input): """ Transform s3 keys to case or page short IDs used by volume XML: 32044142600386_redacted/alto/32044142600386_redacted_ALTO_00009_0.xml -> alto_00009_0 32044142600386_redacted/casemets/32044142600386_redacted_CASEMETS_0001.xml -> casemets_0001 """ if ('CASEMETS' in input or 'ALTO' in input) and input.endswith("xml"): return input.split('/')[-1].split('.')[0].split('redacted_')[1].lower() raise Exception("Not an ALTO or CASEMETS s3_key")
def _gen_non_repeat(func, arg=None, size=5, _set=None): """ Run generate with `func` function until it reach `size` and has no member in _set """ if not _set: _set = set() new_items = set() while len(new_items) < size: # unpack argument to function call if arg: random_item = func(*arg) else: random_item = func() if random_item not in _set and random_item not in new_items: new_items.add(random_item) return new_items
def calculate_duane_mean(est_time, alpha, beta): # pylint: disable=C0103 """ Method to calculate the Duane model cumulative and instantaneous mean values (e.g., MTBF) given the Duane parameters and a time. The Duane model used is: .. note:: cumulative mean = cum_mean = beta * T^alpha .. note:: instantaneous mean = inst_mean = cum_mean / (1 - alpha) :param float est_time: the time at which to calculate the means. :param float alpha: the point estimate of the Duane alpha (shape) parameter. :param float beta: the point estimate of the Duane b (scale) parameter. :return: estimate of the cumulative mean and instantaneous mean. :rtype: tuple """ _cum_mean = beta * est_time**alpha try: _instantaneous_mean = _cum_mean / (1.0 - alpha) except ZeroDivisionError: _instantaneous_mean = _cum_mean return _cum_mean, _instantaneous_mean
def validate_url(content, **kwargs): """ Always return same URL, this param must be send in API :param content: :param kwargs: :return: """ if 't' not in kwargs: return {} return { kwargs['t']: 'http://www.ejemplo.com' }
def get_augmented_coordinate(target_coordinate, strengths): """ Assembles a coordinate in the system used by `xx_region_polytope`. """ *strengths, beta = strengths strengths = sorted(strengths + [0, 0]) interaction_coordinate = [sum(strengths), strengths[-1], strengths[-2], beta] return [*target_coordinate, *interaction_coordinate]
def mix_string (str): """Convert all string to lowercase letters, and replaces spaces with '_' char""" return str.replace(' ', '_').lower()
def calc_if_existing(x, y): """ Function to verify if one value is greater than or equal to another (then return 1) or not (return 0). This is used to verify whether a building's construction or retrofits happened more than 60 years before the year to calculate. Since the embodied energy and emissions are calculated over 60 years, if the year of calculation is more than 60 years after construction, the results will be 0. :param x: Number of years since construction/retrofit :type x: long :param y: Number of years over which the embodied energy/emissions calculation is carried out (i.e., 60) :type y: int :return value: 1 if x <= y; 0 otherwise :rtype value: int """ if x <= y: return 1 else: return 0
def _assert_list_of_mongo_keys(ls): """Assert argument is list of valid mongo collection keys.""" if not isinstance(ls, list) and not isinstance(ls, str): raise ValueError('Type of ls must be list or str, but type({}) is {}.'.format(ls, type(ls))) if isinstance(ls, str): ls = [ls] for l in ls: if not isinstance(l, str): raise ValueError('Type of all elements of ls must be str, but type({}) is {}.'.format(l, type(l))) if l.find('$') != -1: raise ValueError('No dollar char ($) allowed.') ls = list(sorted(ls)) if list(sorted(list(set(ls)))) != ls: raise ValueError('Elements must be unique.') return ls
def filters_from_cli(filters): """Takes a list of filters given in the form of ``name:value`` and converts them to a dictionary. :param filters: A list of strings of ``attribute:value`` pairs. :type filters: list[str] :rtype: dict """ _filters = dict() for i in filters: key, value = i.split(":") if key not in filters: _filters[key] = list() _filters[key].append(value) return _filters
def availability(name): """ Return a 0-1 score representing whether <name>winter.com is available. We could also check for <name>winter available handles on various online services. """ score = 1 return score
def _get_deep(properties, *keys): """Get a final key among a list of keys (each with its own sub-dict).""" for key in keys: properties = properties[key] return properties
def check_not_empty_file(filename): """Checck that a file is not empty""" with open(filename) as f: return f.read() == "NOT EMPTY"
def response_is_valid(xml_dict): """ checks if response has errors, and if so, handles that """ error = xml_dict.get('ItemLookupErrorResponse', None) if error is not None: message = error.get('Error').get('Message') return {"ERROR": message} item_response = xml_dict.get("ItemSearchResponse", None) if item_response is None: item_response = xml_dict.get("ItemLookupResponse", None) request = ( item_response.get("Items").get("Request") ) errors = request.get("Errors") if errors is None: return True message = errors.get("Error").get("Message") return {"ERROR": message}
def unbalance(A, B, C, all=False): """ Voltage/Current Unbalance Function. Performs a voltage/current unbalance calculation to determine the maximum current/voltage unbalance. Returns result as a decimal percentage. Parameters ---------- A: float Phase-A value B: float Phase-B value C: float Phase-C value all: bool, optional Control argument to require function to return all voltage/current unbalances. Returns ------- unbalance: float The unbalance as a percentage of the average. (i.e. 80% = 0.8) """ # Condition Inputs A = abs(A) B = abs(B) C = abs(C) # Gather Average avg = (A + B + C) / 3 # Determine Variance dA = abs(A - avg) dB = abs(B - avg) dC = abs(C - avg) # Gather Maximum Variation mx = max(dA, dB, dC) # Calculate Maximum Variation unbalance = mx / avg # Return Results if all: return (dA / avg, dB / avg, dC / avg) else: return (unbalance)
def process_aws_metrics(datapoints): """ Datapoints are a list of dictionaries with exactly the same keys """ return sum([data["Average"] for data in datapoints]) / len(datapoints)
def _pat_mergeable(p1, p2): """ Compare two *AbstractionPattern* instances for equality regarding an interpretation merging operation. Evidence and hypothesis comparison is assumed to be positive, so only the automata and the initial and final states are compared. """ if p1 is None or p2 is None: return p1 is p2 return p1.automata is p2.automata and p1.istate == p2.istate and p1.fstate == p2.fstate
def dbscan_param_space_search(max_noise, max_eps_tries, number_of_elements, klist, kdist_matrix): """ Does the search of suitable parameters for DBSCAN. First it generates a grid of minpts-eps values based on the noise limit imposed by the user. Then a new value is added based on (Zhou et al. 2012) """ #MIN_NOISE = 5% index_for_min_noise = max(0, int(number_of_elements - 0.05*number_of_elements)) index_for_max_noise = int(number_of_elements - (min(max_noise,100)*0.01*number_of_elements)-1) noise_stride = max(1,(index_for_min_noise - index_for_max_noise) / max_eps_tries) params = [] for i in range(index_for_max_noise, index_for_min_noise, noise_stride): for j in range(len(klist)): params.append((klist[j],kdist_matrix[j][i])) del kdist_matrix return params
def list_wrap_remove(var): """ Helper function for removing list wrapping for a single item that might be wrapped into a list """ if type(var) == list and len(var) > 1: # If an actual list, return the list return var elif type(var) == list and len(var) == 1: # If a list of one item, unpack return var[0] else: return var
def weight_name_to_layer_name(weight_name): """ Convert the name of weights to the layer name """ tokens = weight_name.split('_') type_name = tokens[-1] # modern naming convention if type_name == 'weights' or type_name == 'bias': if len(tokens) >= 3 and tokens[-3] == 'input': return weight_name[:weight_name.rfind('input')-1] return weight_name[:weight_name.rfind(type_name)-1] # legacy if type_name == 'im': return weight_name[:-4] if type_name == 'pose': return weight_name[:-6] return weight_name[:-1]
def parameter_unscale(chain, a, pscale, chis=None, wchis=None, baseline=None): """ ========= Arguments ========= **nmet:** [int] number of metabolites in fit model **a:** [list][float] optimization parameter vector **pscale:** [list][float] un-scaling coefficients for amplitudes **chis:** [float] chi-squared value of least-squares fit **wchis:** [float] weighted chi-squared value of least-squares fit **baseline:** [list][float] list, baseline fit array =========== Description =========== Unscales the "a" vector scaling performed in PARAMETER_SCALE, make adjustments to calculated ChiSquare, WeightedChiSquare, and Baseline values too. ====== Syntax ====== :: a_unscaled = parameter_unscale(chain, a, pscale, chis=None, wchis=None, baseline=None) """ a = a * pscale if baseline is not None: baseline = baseline * pscale[0] if chis is not None: chis = chis * pscale[0]**2 if wchis is not None: wchis = wchis * pscale[0]**2 return a, chis, wchis, baseline
def merge(bboxes, direction): """merge netports Args: bboxes (list): [tuple(x1, y1, x2, y2, score)] direction (int): vertical or horizontal Returns: list: (x1, y1, x2, y2, score) """ num = len(bboxes) assert num > 0 if direction == 0: x1, x2 = bboxes[0][0], bboxes[-1][2] y1, y2 = bboxes[0][1], bboxes[0][3] scores, max_score = 0, bboxes[0][4] for index in range(num): scores += bboxes[index][4] if bboxes[index][4] > max_score: y1, y2 = bboxes[index][1], bboxes[index][3] max_score = bboxes[index][4] score = scores / num else: y1, y2 = bboxes[0][1], bboxes[-1][3] x1, x2 = bboxes[0][0], bboxes[0][2] scores, max_score = 0, bboxes[0][4] for index in range(num): scores += bboxes[index][4] if bboxes[index][4] > max_score: x1, x2 = bboxes[index][0], bboxes[index][2] max_score = bboxes[index][4] score = scores / num return [x1, y1, x2, y2, score]
def checkout_time(customers, cash_registers): """Find the time required for all the customers to checkout.""" if len(customers) < cash_registers: return max(customers) cashiers = [customers[i] for i in range(cash_registers)] for i in customers[cash_registers:]: cashiers[cashiers.index(min(cashiers))] += i return max(cashiers)
def is_image_file(s): """Checks to see if the string starts with 'img:'""" return s.startswith('img:')
def intervals(lst, interval_pct=10): """ A method that divides list into intervals and returns tuples indicating each interval mark. Useful for giving updates when cycling through games. :param lst: lst to divide :param interval_pct: int, pct for each interval to represent. e.g. 10 means it will mark every 10%. :return: a list of tuples of (index, value) """ lst = sorted(lst) dfintervals = [] i = 0 while True: frac = interval_pct / 100 * i index = round(len(lst) * frac) if index >= len(lst): break val = lst[index] dfintervals.append((index, val)) i += 1 return dfintervals
def _row_tuple(row, _): """Extract single value from row or return the whole tuple.""" return row[0] if len(row) == 1 else row
def safe_str(inp): """Python3 version of python-ldap library returns information as bytes. This method is needed to ensure we convert bytes to string before using. """ if isinstance(inp, bytes): return inp.decode("utf-8") return inp
def index_name(args) -> str: """ This is *THE* way we create index names. I tried an experiment with putting each codified book into its own index, thus the commented code below. But I could not merge the search results properly, so now I'm back to indexing everying into one common index called 'main'. /tjd/ 2020-04-06 Args: args (argpase): Argparse arguments. (somtimes passed as a str) Returns: (str): Index name we use for this code section """ return 'main' # if isinstance(args, str): # return args.lower().strip() # return args.code.lower().strip()
def calculate_percent_overlap(x_sep: float) -> int: """Calculate the percentage overlap between X images""" percent_sep = int(100 - 100 * (x_sep / 12800)) return percent_sep
def max_index(alignment_matrix): """ Helper function that computes the index of the maximum in the alignment matrix. """ max_i = 0 max_j = 0 for idx_i in range(len(alignment_matrix)): for idx_j in range(len(alignment_matrix[0])): if alignment_matrix[idx_i][idx_j] > alignment_matrix[max_i][max_j]: max_i = idx_i max_j = idx_j return max_i, max_j
def levenshtein(s1, s2, D=2): """ Returns True iff the edit distance between the two strings s1 and s2 is lesser or equal to D """ def aux(c, i2, D): # i2 is the number of character # consumed in the string s2. # D is the number of error that we # still alow. if D >= 1: # deletion yield i2, D - 1 # substitution yield i2 + 1, D - 1 for d in range(min(D + 1, len(s2) - i2)): if c == s2[i2 + d]: # d insertions followed by a # character match yield i2 + d + 1, D - d current_args = {(0, D)} for c in s1: next_args = set() for (i2, d) in current_args: for next_arg in aux(c, i2, d): next_args.add(next_arg) current_args = next_args for (i2, D) in current_args: if len(s2) - i2 <= D: return True return False
def nonempty(text): """Any text. Returns '(none)' if the string is empty.""" return text or b"(none)"
def recommended_oil_time_constant(cooling_mode): """ Get recommended oil tau constant as per IEEE C57.91-2011 Table 4 """ Cooling_List = ['ONAN', 'ON'] if any(cooling_mode in s for s in Cooling_List): n = 0.8 else: Cooling_List = ['ONAF', 'OB', 'OFAN', 'OF', 'OFB'] if any(cooling_mode in s for s in Cooling_List): n = 0.9 else: n = 1.0 return n
def remove_separators(nhs_number): """Remove separators, if there are any, to go from e.g. 123-456-7890 to 1234567890.""" if not nhs_number[3].isnumeric() and not nhs_number[7].isnumeric(): return nhs_number[0:3] + nhs_number[4:7] + nhs_number[8:] else: return nhs_number
def split(l: list, n_of_partitions: int) -> list: """ Splits the given list l into n_of_paritions partitions of approximately equal size. :param l: The list to be split. :param n_of_partitions: Number of partitions. :return: A list of the partitions, where each partition is a list itself. """ k, m = divmod(len(l), n_of_partitions) return [l[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n_of_partitions)]
def some_method(a1, a2, a3): """ some_method returns the larger of a1 or a2 if a3 is True, then the smaller of a1 or a2 is returned :param a1: First item to compare :param a2: Second item to compare :param a3: Should reverse :return: a1 or a2 """ smaller = a1 larger = a2 if a1 > a2: smaller = a2 larger = a1 return [smaller if a3 else larger]
def pathnode(path): """Split a complete path into its group and leaf components""" while path.endswith('/'): path = path[:-1] path = path.split('/') node = path.pop(-1) return '/'.join(path), node
def vector_sum(lhs, rhs): """ Calculates sum of XY vector :param lhs: Tuple/List containing X and Y coordinates as index 0 and 1 :param rhs: Tuple/List containing X and Y coordinates as index 0 and 1 :return: List containing X and Y sum """ return [lhs[0] + rhs[0], lhs[1] + rhs[1]]
def parse_interval_string(time_interval: str) -> int: """Return minutes from interval string Args: time_interval (str): time interval string Returns: int: minutes in interval """ # append stop char time_interval = f"{time_interval}!" interval_minutes = 0 if "d" in time_interval: days, time_interval = time_interval.split("d") interval_minutes += int(days) * 60 * 24 if "h" in time_interval: hours, time_interval = time_interval.split("h") interval_minutes += int(hours) * 60 if "m" in time_interval: minutes, time_interval = time_interval.split("m") interval_minutes += int(minutes) assert time_interval == "!" return interval_minutes
def make_quality_list(formats): """ Make a comma-separated list of available formats :param formats: """ qualities = "" quarity = "" for f in formats: qualities += f + ", " qualities += "best" return qualities
def process_ctags_output(find, process_list): """ This function cleans the ctags output to get function/method names and line numbers @parameters process_list: Ctags output in list format find: keyword of method type(member/function/class/method) @return This function returns list of function names and line numbers""" return [index for index, _ in enumerate(process_list) if process_list[index - 1] == find and process_list[index].isdigit()]
def uniform_cdf(x: float) -> float: """Returns the prob that a unitform random variable is <=x""" if x < 0: return 0 # Uniform radom is never less than 0 elif x < 1: return x # e.g. P(X <= 0.4) = 0.4 else: return 1 # uniform random is always less than 1
def number_of_cents(change): """ (float) -> int Returns the number of cents in change after all of the whole numbers have been removed. >>> number_of_cents(1.25) 25 >>> number_of_cents(20.00) 0 """ dollar_remainder = change % 1 cents = dollar_remainder * 100 return round(cents)
def floats_to_ints(list_dict): """ HORRIFIC ABOMINATION OF A FUNCTION THAT DIGS TO A CERTAIN LEVEL TO CONVERT FLOATS TO INTEGERS """ for thing in list_dict: for k, v in thing.items(): if isinstance(v, float): thing[k] = int(v) elif isinstance(v, list): for counter, item in enumerate(v): if isinstance(item, float): v[counter] = int(item) return list_dict
def nd_denormalize(x, mu, sigma): """Restore `x` to its oirignal mean (mu) and deviation (sigma) Thus undoes nd_normalize. :param x: :param mu: The mean of the (original) distribution of x vectors. :param sigma: The standard deviation of the (original) distribution of x vectors. """ return (x * sigma) + mu
def broadcast(item, *funcs): """delivers the same item to different functions /--> item --> double(item) -----> \ / \ item -----> item --> triple(item) -----> _OUTPUT \\ / \\--> item --> quadruple(item) --> / One way to construct such a flow in code would be:: double = lambda word: word * 2 triple = lambda word: word * 3 quadruple = lambda word: word * 4 _OUTPUT = broadcast('bar', double, triple, quadruple) _OUTPUT == ('barbar', 'bazbazbaz', 'quxquxquxqux') """ return [func(item) for func in funcs]
def get_class_name_from_config_file(path): """Extracts the class name from given path.""" paths = path.split('/csv/') splitted_paths = paths[1].split('/') class_name = splitted_paths[0] return class_name
def state_reward(state, action): """State transitions on action :param state: previous state :type state: tuple :param action: action :type action: tuple :return: new state and reward :rtype: tuple """ x, y = state dx, dy = action # A -> A' if (x, y) == (0, 1): return (4, 1), 10 # B -> B' if (x, y) == (0, 3): return (2, 3), 5 # new state x1, y1 = x + dx, y + dy # out of bounds if x1 in (-1, 5) or y1 in (-1, 5): return (x, y), -1 # normal case: inside grid, not A or B state else: return (x1, y1), 0
def complete_one_tree(span_idxes, doc, range_idxes=None, full_tree=True): """Compute the complete subtree for a given span Args: span_idxes ([int, int]): the indexes of the given span to be extended doc (Doc): The spacy doc range_idxes ([int, int]], optional): the maixmum indexs. Defaults to None. full_tree (bool, optional): if use the entire tree. Defaults to True. Returns: [int, int]: the extended tree """ span_start, span_end = span_idxes if span_end - span_start == 0: return [span_start, span_end] if all([doc[i].is_punct for i in range(span_start, span_end)]): return [span_start, span_start] span = doc[span_start: span_end] if full_tree: left_edge, right_edge = range_idxes if range_idxes else [0, len(doc)] for token in span: if any([t.is_ancestor(token) for t in span]): continue head = token.head left_edge = max(left_edge, head.left_edge.i) right_edge = min(right_edge, head.right_edge.i) else: all_tokens = set() for token in span: head = token.head subtrees = set([head, token]) #(set(head.subtree) - set(token.subtree)) | set([token]) all_tokens |= subtrees #left_edge = max(left_edge, curr_left_edge) #right_edge = min(right_edge, curr_right_edge) left_edge = min([a.i for a in all_tokens] + [span_start]) right_edge = max([a.i for a in all_tokens] + [span_end-1]) while left_edge < span_start and doc[left_edge].is_punct: left_edge += 1 while right_edge > span_end and doc[right_edge].is_punct: right_edge -= 1 left_idx, right_idx = left_edge, right_edge if left_idx <= right_idx: # or root == span.root.head: return [left_idx, right_idx+1] else: return [span_start, span_end]
def is_tool(name): """ Check whether `name` is on PATH and marked as executable. Returns ------- True if `name` exists """ # from whichcraft import which from shutil import which return which(name) is not None
def solve(lines): """Solve the problem.""" px, py = 0, 0 dx, dy = 1, 0 for item in lines: cmd, val = item[0], int(item[1:]) if cmd == "F": px += val * dx py += val * dy elif cmd == "N": py += val elif cmd == "E": px += val elif cmd == "S": py -= val elif cmd == "W": px -= val elif cmd == "R": if val == 90: dx, dy = dy, -dx elif val == 180: dx, dy = -dx, -dy elif val == 270: dx, dy = -dy, dx else: raise ValueError(f"cannot turn {cmd}{val}") elif cmd == "L": if val == 270: dx, dy = dy, -dx elif val == 180: dx, dy = -dx, -dy elif val == 90: dx, dy = -dy, dx else: raise ValueError(f"cannot turn {cmd}{val}") return abs(px) + abs(py)
def endOfChunk(prevTag, tag, prevType, type): """ checks if a chunk ended between the previous and current word; arguments: previous and current chunk tags, previous and current types """ return ((prevTag == "B" and tag == "B") or (prevTag == "B" and tag == "O") or (prevTag == "I" and tag == "B") or (prevTag == "I" and tag == "O") or (prevTag == "E" and tag == "E") or (prevTag == "E" and tag == "I") or (prevTag == "E" and tag == "O") or (prevTag == "I" and tag == "O") or (prevTag != "O" and prevTag != "." and prevType != type) or (prevTag == "]" or prevTag == "[")) # corrected 1998-12-22: these chunks are assumed to have length 1
def force_int(s): """Forcibly convert to int :param s: any python object :return: int or None """ try: return int(s) except ValueError: return None
def getMissingValue(var): """ Returns the missing value or defaults to -1.e20 """ miss = None if hasattr(var, "encoding"): for key in ("_FillValue", "missing_value", "_fill_value"): if key in var.encoding: miss = var.encoding[key] break if miss is None: miss = -1.e20 return miss