content
stringlengths
42
6.51k
def add_tag(tag, filename): """ modifies a filename adding a tag in the extension: filename.ext -> filename.tag.ext (if the filename has no ".", an extension (.tag) is added) :param tag: rag to be added :param filename: original filename """ pos = filename.find(".") if pos == -1: return filename + "." + tag return filename[:pos] + "." + tag + filename[pos:]
def mask_by( var, maskvar, lo=None, hi=None ): """masks a variable var to be missing except where maskvar>=lo and maskvar<=hi. That is, the missing-data mask is True where maskvar<lo or maskvar>hi or where it was True on input. For lo and hi, None means to omit the constrint, i.e. lo=-infinity or hi=infinity. var is changed and returned; we don't make a new variable. We expect var and maskvar to be dimensioned the same. lo and hi are scalars. """ if lo is None and hi is None: return var if lo is None and hi is not None: maskvarmask = maskvar>hi elif lo is not None and hi is None: maskvarmask = maskvar<lo else: maskvarmask = (maskvar<lo) | (maskvar>hi) if var.mask is False: newmask = maskvarmask else: newmask = var.mask | maskvarmask var.mask = newmask return var
def split_tokenize(text): """Splits tokens based on whitespace after adding whitespace around punctuation. """ return (text.lower().replace('.', ' . ').replace('. . .', '...') .replace(',', ' , ').replace(';', ' ; ').replace(':', ' : ') .replace('!', ' ! ').replace('?', ' ? ') .split())
def ortho(subj_coord, obj_coord, subj_dim, obj_dim): """ It returns a tuple of 3 values: new dim for combined array, component of subj_origin in it, component of obj_origin in it. """ if subj_coord > obj_coord: return (subj_coord + (obj_dim - obj_coord), 0, subj_coord - obj_coord) if subj_coord < obj_coord: return (obj_coord + (subj_dim - subj_coord), obj_coord - subj_coord, 0) if subj_dim > obj_dim: # There is place for obj_mod's tokens in subj_mod, # no increase of dims needed: use subj_mod's dims. return (subj_dim, 0, 0) # There is place for subj_mod's tokens in obj_mod, # no increase of dims needed: use obj_mod's dims. return (obj_dim, 0, 0)
def as_cloud_front_headers(headers): """Convert a series of headers to CloudFront compliant ones. Args: headers (Dict[str, str]): The request/response headers in dictionary format. """ res = {} for key, value in headers.items(): res[key.lower()] = [{"key": key, "value": value}] return res
def _ispath(string): """Check if a string is a directory.""" if string.startswith('$') or string.startswith('/'): return True else: return False
def get_patterns_per_repository(repo): """ Get all unique patterns in the repository. Keyword arguments: repo -- object containing properties of the repo """ dynamic_count = 0 if 'DYNAMIC-PATTERN' in repo['uniquePatterns']: dynamic_count += repo['uniquePatterns']['DYNAMIC-PATTERN'] return len(repo['uniquePatterns']) + dynamic_count
def get_value(obj, key, default=None): """ Mimic JavaScript Object/Array behavior by allowing access to nonexistent indexes. """ if isinstance(obj, dict): return obj.get(key, default) elif isinstance(obj, list): try: return obj[key] except IndexError: return default
def sorted_edges(element, argyris=True): """ Return the edges (as sorted (start, end) tuples) of a given element. Required Arguments ------------------ * element : array-like container of the nodes comprising a finite element. Assumed to be in GMSH or Argyris order. Optional Arguments ------------------ * argyris : Set to True (default) to treat the input as an Argyris element. """ if argyris: return [tuple(sorted((element[i], element[j]))) for (i, j) in [(0, 1), (0, 2), (1, 2)]] else: raise NotImplementedError
def mergeIf(dict1, dict2): """Returns the merge of dict1 and dict2 but prioritizing dict1's items""" return {**dict2, **dict1}
def get_seq_next_residue(seq, idx, size): """Get the next residue after the chunk.""" return seq[idx + size: idx + size + 1]
def countFrequency(fname): """ get the frequency of each byte """ nodeDic = {} if isinstance(fname, list):#work for stdin/stdout part for e in fname: if e in nodeDic.keys(): nodeDic[e] += 1 else: nodeDic[e] = 1 return sorted(nodeDic.items(), key = lambda x:x[1]) with open(fname,'rb') as f: while True: byte = f.read(1) if not byte: break if byte in nodeDic.keys(): nodeDic[byte] += 1 else: nodeDic[byte] = 1 return sorted(nodeDic.items(), key = lambda x:x[1])
def ERR_FILEERROR(sender, receipient, message): """ Error Code 424 """ return "ERROR from <" + sender + ">: " + message
def compare_list_of_committees(list1, list2): """Check whether two lists of committees are equal when the order (and multiplicities) in these lists are ignored. To be precise, two lists are equal if every committee in list1 is contained in list2 and vice versa. Committees are, as usual, sets of positive integers. Parameters ---------- list1, list2 : iterable of sets""" for committee in list1 + list2: assert isinstance(committee, set) return all(committee in list1 for committee in list2) and all( committee in list2 for committee in list1 )
def check_new(database: list, url_dict: dict) -> list: """ Function that filters list of all URLs to only URLs not present in current database @param database: a list of all craigslist post ids currently in text database @param url_dict: a dict of URLs to be checked against URLs already scraped, generated by extract_links() @returns: filtered list of only new URLs """ # for ids not in database, return url new=[url_dict[key] for key, value in url_dict.items() if int(key) not in database] return(new)
def beautifyDescription(description): """ Converts docstring of a function to a test description by removing excess whitespace and joining the answer on one line """ lines = (line.strip() for line in description.split('\n')) return " ".join(filter(lambda x: x, lines))
def apply_eqn(eqn, x): """ Applies a line equation in the form returned by get_eqn(). """ return eqn[0] * x + eqn[1]
def link(item:dict): """ input: item:dict ={"content":str,"link":str} """ return f"[{item['content']}]({item['link']['url']})"
def dict_filter( kwargs, filters, defaults=None, copy=False, short=False, keep=False, **kwadd, ): """Filter out kwargs (typically extra calling keywords) Parameters ---------- kwargs: Dictionnary to filter. filters: Single or list of prefixes. defaults: dictionnary of default values for output fictionnary. copy: Simply copy items, do not remove them from kwargs. short: Allow prefixes to not end with ``"_"``. keep: Keep prefix filter in output keys. Example ------- .. ipython:: python @suppress from xoa.misc import dict_filter kwargs = {'basemap':'f', 'basemap_fillcontinents':True, 'quiet':False,'basemap_plot':False} dict_filter(kwargs,'basemap', defaults=dict(drawcoastlines=True,plot=True), good=True) kwargs Return ------ dict """ if isinstance(filters, str): filters = [filters] if copy: kwread = kwargs.get else: kwread = kwargs.pop # Set initial items kwout = {} for filter_ in filters: if not filter_.endswith("_") and filter_ in kwargs: if isinstance(kwargs[filter_], dict): kwout.update(kwread(filter_)) else: kwout[filter_] = kwread(filter_) if not short and not filter_.endswith("_"): filter_ += "_" for att, val in list(kwargs.items()): if att.startswith(filter_) and att != filter_: if keep: kwout[att] = kwread(att) else: kwout[att[len(filter_):]] = kwread(att) # Add some items kwout.update(kwadd) # Set some default values if defaults is not None: for att, val in defaults.items(): kwout.setdefault(att, val) return kwout
def parse_schedule_with_breaks(schedule_data): """Parse schedule_data. Return list of tuples having delay in deparing to the first bus in list and bus number. Bus number represents as welll time between bus departures.""" return [ (delay_in_departing, int(bus)) for delay_in_departing, bus in enumerate(schedule_data.split(',')) if bus.isdigit() ]
def predict_next_element(lis, base_case, reduce_function, inference_function): """ :param lis: the list to predict the next element of :param base_case: each iteration would be given the current list. if it returns None we continue the recursion, otherwise we assume that the returned value is the base case for the recursion and return inference_function(lis, base_case(lis)) :param reduce_function: a function that would be applied to the given list to reduce it to another (smaller?) list that would be fed recursively to the function :param inference_function: would be given the both lis and the output of predict_next_element(reduce_function(lis), reduce_function, inference_function) - i.e. the next element in the reduced list, and would give us back the next element in the list :return: the predicted next element in lis """ if len(lis) == 0: return None gens = [lis] while True: base_case_result = base_case(gens[-1]) if base_case_result is not None: gens.append([base_case_result]) break gens.append(reduce_function(gens[-1])) # now predict the next element # the last gen contains only 1 element so we assume constant series predicted_next_element_of_current_gen = gens[-1][-1] for current_gen in range(len(gens) - 2, -1, -1): predicted_next_element_of_current_gen = inference_function(gens[current_gen], predicted_next_element_of_current_gen) return predicted_next_element_of_current_gen
def remove_duplicates_with_order(lst): """ Remove all duplicates of a list while not reordering the list. """ if isinstance(lst, list): lst = list(n for i, n in enumerate(lst) if n not in lst[:i]) if isinstance(lst, tuple): lst = tuple(n for i, n in enumerate(lst) if n not in lst[:i]) return lst
def instantiate_schema(values, rule): """ evaluates rule by substituting values into rule and evaluating the resulting literal. This is currently insecure * "For security the ast.literal_eval() method should be used." """ r = rule for k in values.keys(): r = r.replace(k, values[k].__str__()) #return ast.literal_eval(r) return eval(r)
def note_favorite(note): """ get the status of the note as a favorite returns True if the note is marked as a favorite False otherwise """ if 'favorite' in note: return note['favorite'] return False
def decode_MAC(MAC_address): """ Returns a long int for a MAC address @param MAC_address : like "00:12:34:56:78:9a" @type MAC_address : str of colon separated hexadecimal ints @return: long int """ parts = MAC_address.split(':') if len(parts) == 6: value = 0 for i in range(6): shift = 8*(5-i) value += int(parts[i],16) << shift return value else: raise RuntimeError("Invalid MAC address: %s" % MAC_address)
def _llvm_get_formatted_target_list(repository_ctx, targets): """Returns a list of formatted 'targets': a comma separated list of targets ready to insert in a template. Args: repository_ctx: the repository_ctx object. targets: a list of supported targets. Returns: A formatted list of targets. """ fmt_targets = [] for target in targets: fmt_targets.append(' "' + target + '",') return "\n".join(fmt_targets)
def hess_binary(n, oracle, fast=False, cycles=1, target=0, seq=None): """ HESS Algorithm is a Universal Black Box Optimizer (binary version). :param n: The size of bit vector. :param oracle: The oracle, this output a number and input a bit vector. :param fast: More fast some times less accuracy. :param cycles: How many times the HESS algorithm is executed. :param target: Any value less than this terminates the execution. :param seq: External sequence if not set default sequence is used (1..n) :return optimized sequence. """ import hashlib db = [] if seq is not None: xs = seq else: xs = [False] * n glb = oracle(xs) + 1 opt = xs[:] def __inv(i, j, xs): if xs[i] == xs[j]: xs[i] = not xs[j] else: aux = xs[i] xs[i] = not xs[j] xs[j] = aux top = glb for _ in range(cycles): glb = top + 1 if fast: while True: anchor = glb for i in range(len(xs) - 1): for j in range(i + 1, len(xs)): key = hashlib.sha256(bytes([min(i, j)] + xs + [max(i, j)])).hexdigest() if key not in db: db.append(key) db.sort() else: continue __inv(min(i, j), max(i, j), xs) loc = oracle(xs) if loc < glb: glb = loc if glb < top: top = glb opt = xs[:] if top <= target: return opt elif loc > glb: __inv(min(i, j), max(i, j), xs) if anchor == glb: break else: while True: anchor = glb for i in range(len(xs)): for j in range(len(xs)): key = hashlib.sha256(bytes([min(i, j)] + xs + [max(i, j)])).hexdigest() if key not in db: db.append(key) db.sort() else: continue __inv(min(i, j), max(i, j), xs) loc = oracle(xs) if loc < glb: glb = loc if glb < top: top = glb opt = xs[:] if top <= target: return opt elif loc > glb: __inv(min(i, j), max(i, j), xs) if anchor == glb: break return opt
def persistence(num: int) -> int: """ returns the persistence of a number\ :param num: the number to check :param num: the number to calculate the persistence of Example: >>> persistence(1234) >>> 2 """ string_num = str(num) count: int = 0 while len(string_num) > 1: result = 1 for i in string_num: result *= int(i) string_num = str(result) count += 1 return count
def _UTMLetterDesignator(Lat): """This routine determines the correct UTM letter designator for the given latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S Written by Chuck Gantz- chuck.gantz@globalstar.com""" if 84 >= Lat >= 72: return 'X' elif 72 > Lat >= 64: return 'W' elif 64 > Lat >= 56: return 'V' elif 56 > Lat >= 48: return 'U' elif 48 > Lat >= 40: return 'T' elif 40 > Lat >= 32: return 'S' elif 32 > Lat >= 24: return 'R' elif 24 > Lat >= 16: return 'Q' elif 16 > Lat >= 8: return 'P' elif 8 > Lat >= 0: return 'N' elif 0 > Lat >= -8: return 'M' elif -8> Lat >= -16: return 'L' elif -16 > Lat >= -24: return 'K' elif -24 > Lat >= -32: return 'J' elif -32 > Lat >= -40: return 'H' elif -40 > Lat >= -48: return 'G' elif -48 > Lat >= -56: return 'F' elif -56 > Lat >= -64: return 'E' elif -64 > Lat >= -72: return 'D' elif -72 > Lat >= -80: return 'C' else: return 'Z' # if the Latitude is outside the UTM limits
def _handleResponse(results, assure_address=True): """Process the results from a Google Maps API callself. Heuristically (by visual inspection), approximate addresses tend to resolve better to buildings than geographic centers. So if we are willing to relax address constraints, then filter on that type of result when possible. """ if len(results) != 1: if assure_address: return None # Filter out only the approximate addresses, as they tend to be # what we want temp_results = [result for result in results if ( result['geometry']['location_type'] == 'APPROXIMATE' )] # But if none of these are approximate results # (e.g. GEOGRAPHIC_CENTER), then ignore the filtering above results = temp_results or results # Pick the first one as we will trust Google's (assumed) relevance ordering return results[0]
def full_method(apple, pear, banana=9, *args, **kwargs): """ methody docstring """ return (apple, pear, banana, args, kwargs)
def product_and_sum(a, b): """Return product and sum tuple of two numbers.""" return (a * b, a + b)
def map_cap_to_opnames(instructions): """Maps capabilities to instructions enabled by those capabilities Arguments: - instructions: a list containing a subset of SPIR-V instructions' grammar Returns: - A map with keys representing capabilities and values of lists of instructions enabled by the corresponding key """ cap_to_inst = {} for inst in instructions: caps = inst['capabilities'] if 'capabilities' in inst else ['0_core_0'] for cap in caps: if cap not in cap_to_inst: cap_to_inst[cap] = [] cap_to_inst[cap].append(inst['opname']) return cap_to_inst
def php_array_search(_needle, _haystack, _strict=False): """ >>> array = Array({0: 'blue', 1: 'red', 2: 'green', 3: 'red'}) >>> php_array_search('green', array) 2 >>> php_array_search('red', array) 1 >>> php_array_search('purple', array) False """ if not _needle in _haystack.values(): return False for k, v in _haystack.items(): if v == _needle: return k return False
def expandInSubstr(t,args={}): """ expand all occurences of a macro bounded by %item% with a value from the dict passed via args """ x = 0 while (x > -1): x = t.find('%') if (x > -1): y = t[x+1:].find('%') if (y > -1): xx = t[x+1:x+y+1] if (args.has_key(xx)): _t = args[xx] t = t.replace('%'+xx+'%',_t) x = y+1 return t
def add_with_saturation(a:int, b:int, *, high_water_mark:int=256): """Addition with a maximum threshold Parameters ---------- a:int b:int number to add together high_water_mark:int[default to 256] Maximum value returned by the addition Returns ------- Sum of ``a`` and ``b`` if lower than ``high_water_mark``. Raise ----- ValueError or high_water_mark lower than ``10`` See Also -------- :func:`add_with_saturation_bad` """ if high_water_mark < 0: raise ValueError('High water mark too low') return min(a+b, high_water_mark)
def snake_to_camel(name: str) -> str: """Converts a snake case string to a camelcase string.""" return ''.join(name.title().split('_'))
def wave_array(arr): """ Given an array of integers, sort the array into a wave like array and return it, In other words, arrange the elements into a sequence such that a[0] >= a[1] <= a[2] >= a[3] <= a[4] """ for i in range(0, len(arr), 2): # if current even element is smaller than previous, swap if i > 0 and arr[i] < arr[i-1]: # skip the 0th element arr[i], arr[i-1] = arr[i-1], arr[i] # if current even element is smaller than next if i < (len(arr) - 1) and arr[i] < arr[i+1]: arr[i], arr[i+1] = arr[i+1], arr[i] return arr
def emd_function_value(pixel_group, base): """Calculate and return the f value of the given pixel group f value is defined as a weighted sum of the pixel values modulo a chosen base""" f_value = 0 for i in range(len(pixel_group)): f_value = (f_value + pixel_group[i] * (i + 1)) % base return f_value
def find_column_types(orig_metadata, synth_method, categorical_types): """ This method creates a list of categorical columns (defined by user) and a list of numerical columns, using types included in the .json metadata. """ categorical_features = [] numeric_features = [] for col in orig_metadata['columns']: # sgf works only with categorical features if synth_method == 'sgf': categorical_features.append(col["name"]) elif col['type'] in categorical_types: categorical_features.append(col["name"]) else: numeric_features.append(col["name"]) return categorical_features, numeric_features
def stringify_datetime_types(data: dict): """Stringify date, and datetime, types.""" for key in ("date", "timestamp"): if key in data: data[key] = data[key].isoformat() return data
def make_a_list_from_uncommon_items(list1, list2): """ Make a list from uncommon items between two lists. :param list1: list :param list2: list :return: list """ try: if len(list1) - len(list2) >= 0: return list(set(list1) - set(list2)) return list(set(list2) - set(list1)) except Exception as error: return error
def parse_request_parameters(filter_args): """Generates a dict with params received from client""" session_args = {} for key, value in filter_args.items(): if key != "limit" and key != "next": session_args[key] = set(value.replace(" ", "").split(",")) return session_args
def convert_direction_to_north_or_west(distance_moved, direction): """Convert East and South direction to North and East.""" if direction in ["S", "E"]: if direction == "S": distance_moved = distance_moved * -1 direction = "N" elif direction == "E": distance_moved = distance_moved * -1 direction = "W" return distance_moved, direction
def check(line, queries): """ check that at least one of queries is in list, l """ line = line.strip() spLine = line.replace('.', ' ').split() matches = set(spLine).intersection(queries) if len(matches) > 0: return matches, line.split('\t') return matches, False
def karatsuba(x: int, y: int) -> int: """Multiply two numbers using the Karatsuba algorithm Args: x (int): the first integer to be multiplied y (int): the second integer to be multiplied Returns: int: the result of the multiplication """ # Break the recursion written below if the numbers have less than 2 digits. if x < 10 or y < 10: return x * y # Get the maximum length of (x, y) and get it divided by half n = max(len(str(x)), len(str(y))) half = n // 2 # a,b,c,d are a result of x and y digits separated in half (i.e x = 1234 results in a = 12, x = 34) a = x // 10 ** half b = x % 10 ** half c = y // 10 ** half d = y % 10 ** half # Karatsuba formula is 10^n * ac + 10^(n/2) * (ad+bc) + bd # ac, bd and ad_plus_b are calculated recursively. ac = karatsuba(a, c) bd = karatsuba(b, d) ad_plus_bc = karatsuba(a+b, c+d) - ac - bd # Using half * 2 resolves the algorithm's issue with odd number of inputs return (10 ** (half * 2)) * ac + (10 ** half) * ad_plus_bc + bd
def lss(inlist): """ Squares each value in the passed list, adds up these squares and returns the result. Usage: lss(inlist) """ ss = 0 for item in inlist: ss = ss + item*item return ss
def get_milliseconds(time_sec: float) -> int: """ Convertit un temps en secondes sous forme de float en millisecondes sous forme d'un int. Args: time_sec: Le temps en secondes. Returns: Le temps en millisecondes. """ assert isinstance(time_sec, float) return int(round(time_sec * 1000))
def is_ecalendar_crse(course_code): """Checks whether a given crse code matches the eCalendar crse format, code-900 (ccc-xxx or ccc-xxxcx) Rules: at least 7 char in length two sections separated by '-' first section is letters second is alphanumeric, first 3 are numbers """ code = str(course_code) if len(code) < 7 or code[4] != '-': return False code = code.split('-') return code[0].isalpha() and code[1].isalnum() and code[1][:3].isdigit()
def safe_divide_list(x_list, y_list): """ Divides x_list by y_list. Returns list of division results, where a divide by zero results in a zero. :param x_list: list of value's :param y_list: list of value,s :return: list of x value's divided by y value's. results in 0 value when y is zero. """ result = [] for x1, y1 in zip(x_list, y_list): if y1 != 0.0: result.append(x1 / y1) else: result.append(0.0) return result
def onehot_encode(position: int, count: int) -> list: """One-hot encode position Args: position (int): Which entry to set to 1 count (int): Max number of entries. Returns: list: list with zeroes and 1 in <position> """ t = [0] * (count) t[position - 1] = 1 return t
def is_complete(board): """ Given a bingo board as a 2D array of integers, return if it is now complete """ rows = board for row in rows: if all(cell < 0 for cell in row): return True # We have a row which has all numbers marked width = len(rows[0]) for i in range(width): column = [row[i] for row in rows] if all(cell < 0 for cell in column): return True # We have a column which has all numbers marked return False
def selection_sort(alist): """ Sorts a list using the selection sort algorithm. alist - The unsorted list. Examples selection_sort([4,7,8,3,2,9,1]) # => [1,2,3,4,7,8,9] a selection sort looks for the smallest value as it makes a pass and, after completing the pass, places it in the proper location Worst Case: O(n^2) Returns the sorted list. """ # Traverse through all array elements for i in range(len(alist)): # assume that the first item of the unsorted segment is the smallest lowest_value_index = i # This loop iterates over the unsorted items for j in range(i + 1, len(alist)): if alist[j] < alist[lowest_value_index]: lowest_value_index = j # Swap values of the lowest unsorted element with the first unsorted # element alist[i], alist[lowest_value_index] = alist[lowest_value_index], alist[i] return alist
def get_tuple_version(hexversion): """Get a tuple from a compact version in hex.""" h = hexversion return(h & 0xff0000) >> 16, (h & 0xff00) >> 8, h & 0xff
def parse_property_string(prop_str): """ Generate valid property string for extended xyz files. (ref. https://libatoms.github.io/QUIP/io.html#extendedxyz) Args: prop_str (str): Valid property string, or appendix of property string Returns: valid property string """ if prop_str.startswith("Properties="): return prop_str return "Properties=species:S:1:pos:R:3:" + prop_str
def disgustingworkaroundforpossiblebug(p): """this is the solution for weird interleaving effects p is procpar dictionary returns boolean use when sliceorder is 1 but is not actually interleaved rearrange slices in kmake when false """ def _evenslices(p): print('slices {}'.format(p['ns'])) return (int(p['ns']) % 2 == 0) # this means interleaved if int(p['sliceorder'])==1: # switch for sequnces #if p['pslabel'][:4] == 'gems': # switch for seqcon if p['seqcon'] == 'nccnn': # switch for even-odd slices if _evenslices(p): # switch for orientation if p['orient'] in ['sag','sag90','trans90','cor','cor90']: return True else: return False else: # switch for orientation if p['orient'] in ['sag','sag90','trans','cor','cor90']: return True else: return False return False # this means no interleave else: return False
def total_plane_strain_R_disp(r, p, ri, ro, E, nu, dT, alpha): """ Constants of integration (stress) assume a linear temperature gradient and constraint of plane strain """ A = ri**2 * ro**2 * -p / (ro**2 - ri**2) C = p * ri**2 / (ro**2 - ri**2) C_1 = -alpha*dT*(nu + 1)*(2*nu - 1)*\ (-ri**3/6 + ri*ro**2/2 - ro**3/3)/((nu - 1)*(ri - ro)**2*(ri + ro)) C_2 = alpha*dT*ri**2*(nu + 1)*\ (-ri**3/6 + ri*ro**2/2 - ro**3/3)/((nu - 1)*(ri - ro)**2*(ri + ro)) C_3 = 0 u = (-A*nu - A + C*r**2*(-2*nu**2 - nu + 1))/(E*r) + \ C_1*r + C_2/r + C_3*nu*r/E + alpha*(nu + 1)*\ (-dT*r**3/(3*ri - 3*ro) + dT*r**2*ri/(2*ri - 2*ro) + \ dT*ri**3/(3*ri - 3*ro) - dT*ri**3/(2*ri - 2*ro))/(r*(1 - nu)) return u
def get_max_temp(liveness, args): """Returns sum of maximum memory usage per tile of temporary variables.""" return sum(liveness["notAlwaysLive"]["maxBytesByTile"])
def can_embed(bin_msg: str, width: int, height: int) -> bool: """Determines whether the image can hold the message. Parameters: ----------- bin_msg: string: A string of 1's and 0's representing the characters in the msg. width: int: The width of the image. height: int: The height of the image. Returns: -------- embed boolean: States whether the message can fit in the specified image. """ embed = len(bin_msg) + 8 < 3 * width * height # + 8 for terminating 00000000 return embed
def decompress_database(database): """Undo the above compression""" for section in database["sections"]: new_results = [] for result in section["results"]: parameters = {} for name, value in zip(section["parameter_names"], result[0].split(",")): parameters[name] = int(value) new_result = { "parameters": parameters, "time": result[1] } new_results.append(new_result) section["results"] = new_results return database
def remove_first(generator, default=None): """This function only works once for each generator - because it pops returned items from generator""" if generator: for item in generator: return item return default
def _addsum(arr): """Compute the XOR checksum value. Parameters ---------- arr : bytes, bytearray, or list of int Bytes to be addsum. Returns ------- addsum: int the checksum byte as integer. """ if not isinstance(arr, (bytes, bytearray, list)): raise TypeError("invalid type of arr:", type(arr)) if len(arr) == 0: raise ValueError("empty array") arr = list(arr) addsum = arr[0] for byte in arr[1:]: addsum ^= byte return addsum
def nondimensionalise(rc, qc, rho, x, nature): """ Nondimensionalise a parameter using the characteristic parameters. Arguments --------- rc : float Characteristic radius (length) qc : float Characteristic flow rho : float Density of blood x : float Parameter to redimensionalise nature : string Nature of parameter to be redimensionalised Returns ------- return : float Dimensionless quantity """ if nature == 'time': x = x*qc/rc**3 elif nature == 'area': x = x/rc**2 elif nature == 'flow': x = x/qc elif nature == 'pressure': x = x*rc**4/rho/qc**2 return x
def patch_version(apiLevel): """Helper function for generate a patch for a JSON state fixture.""" return f"""{{ "device": {{ "apiLevel": {apiLevel} }} }}"""
def translate_key(key): """ Function to return the correct configuration key. If not found return the key itself. Returns a string. """ mapping = { 'user': 'User', 'identityfile': 'IdentityFile', 'proxycommand': 'ProxyCommand', 'ip': 'Hostname', 'hostname': 'Hostname', 'port': 'Port', } if key in mapping: return str(mapping[key]) else: return str(key)
def prune_empty(items): """ Remove None items from a list. """ return list(filter(None, items))
def build_arbitration_id(msg_type, source_id, msg_id): """ typedef union CanId { uint16_t raw; struct { uint16_t source_id : 4; uint16_t type : 1; uint16_t msg_id : 6; }; } CanId; """ return ((source_id & ((0x1 << 4) - 1)) << (0)) | \ ((msg_type & ((0x1 << 1) - 1)) << (4)) | \ ((msg_id & ((0x1 << 6) - 1)) << (4 + 1))
def gibbs(dH,dS,temp=37): """ Calc Gibbs Free Energy in cal/mol from enthaply, entropy, and temperature Arguments: dH -- enthalpy in kcal/mol dS -- entropy in cal/(mol * Kelvin) temp -- temperature in celcius (default 37 degrees C) """ return dH*1000 - (temp+273.15)*dS
def create_vocab(docs): """Create a vocabulary for a given set of documents""" words = set() for d in docs: for w in d: words.add(w) vocab = {} for i, w in enumerate(list(words)): vocab[w] = i return vocab
def _call_strace(self, *args, **kwargs): """ Top level function call for Strace that can be run in parallel """ return self(*args, **kwargs)
def sum_var_positional_args(a, b, *args): """perform math operation, variable number of positional args""" # does not work unless decorator used functools.wrap # print("func name inside # {}".format(show_math_resultVarPositionalArgs.__name__)) sum = a + b for n in args: sum += n return sum
def str_to_bool(string): # type (str) -> bool """Converts string to boolean value. Args: string (str): Input string. Returns: bool: True if input string is "True" or "true", False if input string is "False" or "false". Raises: ValueError: If input string is not in True, true, False, false. """ if string in ("True", "true"): return True elif string in ("False", "false"): return False else: raise ValueError("Cannot convert {} to bool".format(string))
def add_name_combiner(combiner, name): """ adding a node's name to each field from the combiner""" combiner_changed = [] for comb in combiner: if "." not in comb: combiner_changed.append("{}.{}".format(name, comb)) else: combiner_changed.append(comb) return combiner_changed
def assert_keys_in_form_exist(form, keys): """ Check all the keys exist in the form. :param form: object form :param keys: required keys :return: True if all the keys exist. Otherwise return false. """ if form is None: return False if type(form) is not dict: return False for key in keys: if key not in form.keys(): return False # value = form[key] # if value == None: # return False return True
def _digit_to_alpha_num(digit, base=52): """Convert digit to base-n.""" base_values = { 26: {j: chr(j + 65) for j in range(0, 26)}, 52: {j: chr(j + 65) if j < 26 else chr(j + 71) for j in range(0, 52)}, 62: {j: chr(j + 55) if j < 36 else chr(j + 61) for j in range(10, 62)}, } if base not in base_values: raise ValueError( f"Base-{base} system is not supported. Supported bases are: " f"{list(base_values.keys())}" ) return base_values[base].get(digit, digit)
def has_prefix(sub_s, dic_list): """ :param dic_list: :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid :return: (bool) If there is any words with prefix stored in sub_s """ new_list = [] for word in dic_list: if word.startswith(sub_s) is True: new_list.append(word) if not new_list: return False else: return new_list
def _parse_range_value(range_value): """Parses a single range value from a Range header. Parses strings of the form "0-0", "0-", "0" and "-1" into (start, end) tuples, respectively, (0, 0), (0, None), (0, None), (-1, None). Args: range_value: A str containing a single range of a Range header. Returns: A tuple containing (start, end) where end is None if the range only has a start value. Raises: ValueError: If range_value is not a valid range. """ end = None if range_value.startswith('-'): start = int(range_value) if start == 0: raise ValueError('-0 is not a valid range.') else: split_range = range_value.split('-', 1) start = int(split_range[0]) if len(split_range) > 1 and split_range[1].strip(): end = int(split_range[1]) if start > end: raise ValueError('start must be <= end.') return (start, end)
def format_hex(text: str) -> str: """ Formats a hexadecimal string like "0x12B3" """ before, after = text[:2], text[2:] return f"{before}{after.upper()}"
def fix_filename(urlTitle): """ Change the url 'urlTitle' substring used to acess the DOU article to something that can be used as part of a filename. """ fixed = urlTitle.replace('//', '/') return fixed
def tanh_backward(dout, cache): """ Computes the backward pass for a layer of tanh units. Input: - dout: Upstream derivatives, of any shape - cache: Values from the forward pass, of same shape as dout Returns: - dx: Gradient with respect to x """ dx, tanh_x = None, cache ############################################################################# # Implement the tanh backward pass. # ############################################################################# dx = (1 - tanh_x ** 2) * dout ############################################################################# # # ############################################################################# return dx
def gcd(lat1, lon1, lat2, lon2, earth_radius=6367): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees or in radians) All (lat, lon) coordinates must have numeric dtypes, be already converted to radians and be of equal length. """ import numpy as np # if to_radians: # lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2]) a = np.sin((lat2-lat1)/2.0)**2 + \ np.cos(lat1) * np.cos(lat2) * np.sin((lon2-lon1)/2.0)**2 return earth_radius * 2 * np.arcsin(np.sqrt(a))
def __getTriangleCentroid(triangle): """ Returns the centroid of a triangle in 3D-space """ # group the xs, ys, and zs coordGroups = zip(triangle[0], triangle[1], triangle[2]) centroid = tuple([sum(coordGroup)/3.0 for coordGroup in coordGroups]) return centroid
def cyl_inside_box(s1, v2): """return true if s1 is inside v2""" b1 = s1["center"][1] + s1["radius"] < v2["p2"][1] b2 = s1["center"][1] - s1["radius"] > v2["p1"][1] b3 = s1["center"][0] + s1["radius"] < v2["p2"][0] b4 = s1["center"][0] - s1["radius"] > v2["p1"][0] return (b1 and b2 and b3 and b4)
def map_category(category): """ Monarch's categories don't perfectly map onto the biolink model https://github.com/biolink/biolink-model/issues/62 """ return { 'variant' : 'sequence variant', 'phenotype' : 'phenotypic feature', 'sequence variant' : 'variant', 'phenotypic feature' : 'phenotype', # 'model' : 'model to disease mixin' }.get(category.lower(), category)
def isLiteralValue(symbol, T=[]): """ >>> isLiteralValue("'123'") True >>> isLiteralValue("'123") False >>> isLiteralValue("abc") False >>> isLiteralValue("\\'abc\\'", ["a", "b", "c"]) True >>> isLiteralValue("\\'abc\\'", ["a", "b"]) False >>> isLiteralValue('0') False >>> isLiteralValue("\\'123\\"") False """ if symbol[0] != symbol[-1]: return False if symbol[0] == "'" or symbol[0]=='"': if T: if set(list(symbol[1:-1])) <= set(T): return True else: return False return True return False
def clip_to_boundary(bbox, canvas_shape): """Clip bbox coordinates to canvas shape Args: bbox: canvas_shape: Returns: """ ymin, xmin, ymax, xmax = bbox assert len(canvas_shape) == 2, 'canvas shape {} is not 2D!'.format(canvas_shape) height, width = canvas_shape # crop to boundary ymin = max(ymin, 0) xmin = max(xmin, 0) ymax = min(ymax, height) xmax = min(xmax, width) assert ymax - ymin > 1 and xmax - xmin > 1, 'Bbox too small, invalid crop!' bbox = (ymin, xmin, ymax, xmax) return bbox
def get_dimensions(model_dict): """Extract the dimensions of the model. Args: model_dict (dict): The model specification. See: :ref:`model_specs` Returns: dict: Dimensional information like n_states, n_periods, n_controls, n_mixtures. See :ref:`dimensions`. """ all_n_periods = [len(d["measurements"]) for d in model_dict["factors"].values()] dims = { "n_states": len(model_dict["factors"]), "n_periods": max(all_n_periods), # plus 1 for the constant "n_controls": len(model_dict.get("controls", [])) + 1, "n_mixtures": model_dict["estimation_options"].get("n_mixtures", 1), } return dims
def is_core_cs(csname): """ If 'csname' is a core C-state name, returns 'True'. Returns 'False' otherwise (even if 'csname' is not a valid C-state name). """ return csname.startswith("CC") and len(csname) > 2
def compute_sea_level(altitude: float, atmospheric: float) -> float: """ Calculates the pressure at sea level (in hPa) from the specified altitude (in meters), and atmospheric pressure (in hPa). # Equation taken from BMP180 datasheet (page 17): # http://www.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf Args: altitude : Altitude in meters atmospheric : Atmospheric pressure in hPa Return: float The approximate pressure """ return atmospheric / pow(1.0 - (altitude / 44330.0), 5.255)
def get_full_link(link): """ Add domain to link if missing: """ domain = 'https://www.camara.leg.br' if link[4] != 'http' and link[0] == '/': return domain + link else: return link
def roms_varlist(option): """ varlist = roms_varlist(option) Return ROMS varlist. """ if option == 'physics': varlist = (['temp', 'salt', 'u', 'v', 'ubar', 'vbar', 'zeta']) elif option == 'physics2d': varlist = (['ubar', 'vbar', 'zeta']) elif option == 'physics3d': varlist = (['temp', 'salt', 'u', 'v']) elif option == 'mixing3d': varlist = (['AKv', 'AKt', 'AKs']) elif option == 's-param': varlist = (['theta_s', 'theta_b', 'Tcline', 'hc']) elif option == 's-coord': varlist = (['s_rho', 's_w', 'Cs_r', 'Cs_w']) elif option == 'coord': varlist = (['lon_rho', 'lat_rho', 'lon_u', 'lat_u', 'lon_v', 'lat_v']) elif option == 'grid': varlist = (['h', 'f', 'pm', 'pn', 'angle', 'lon_rho', 'lat_rho', 'lon_u', 'lat_u', 'lon_v', 'lat_v', 'lon_psi', 'lat_psi', 'mask_rho', 'mask_u', 'mask_v', 'mask_psi']) elif option == 'hgrid': varlist = (['f', 'dx', 'dy', 'angle_rho', 'lon_rho', 'lat_rho', 'lon_u', 'lat_u', 'lon_v', 'lat_v', 'lon_psi', 'lat_psi', 'mask_rho', 'mask_u', 'mask_v', 'mask_psi']) elif option == 'vgrid': varlist = (['h', 's_rho', 's_w', 'Cs_r', 'Cs_w', 'theta_s', 'theta_b', 'Tcline', 'hc']) else: raise Warning('Unknow varlist id') return varlist
def comma_code(a_list): """ Turns a list into a string of elements separated by commas :param a_list: a list of strings :return: a string separated by commas """ a_list[-1] = 'and ' + a_list[-1] stringified = ", ".join(a_list) return stringified
def beta_from_gamma(gamma): """Return exponent beta for the (integrated) propagator decay G(lag) = lag**-beta that compensates a sign-autocorrelation C(lag) = lag**-gamma. """ return (1-gamma)/2.
def fix_spio_issues(mcn_net): """Modify data structures that have been loaded using scipy.io.loadmat The purpose of this function is to address an issue with loadmat, by which it does not always consistently produce the same array dimensions for objects stored in .mat files. Specifically, the level of nesting for the final element in a structure array can differ from previous elements, so we apply a sanity check and fix resulting issues in this function. """ states = ['inputs', 'outputs'] for state in states: tmp = mcn_net['layers'][state] for idx, elem in enumerate(tmp): if not isinstance(elem, list): tmp[idx] = [elem] mcn_net['layers'][state] = tmp return mcn_net
def update_config(config,config_update): """Update config with new keys. This only does key checking at a single layer of depth, but can accommodate dictionary assignment :config: Configuration :config_update: Updates to configuration :returns: config """ for key, value in config_update.items(): config[key] = value return config
def velocity(estimate, actual, times=60): """Calculate velocity. >>> velocity(2, 160, times=60) 0.75 >>> velocity(3, 160, times=60) 1.125 >>> velocity(3, 160, times=80) 1.5 """ return (estimate*times)/(actual*1.)
def draw_hierarchy(dict, point): """ Function to add the locations at the end to the list (stack). :param dict: dictionary of points :param point: location to add :return: list """ list = [] p_l = map(str, point) p_l = ','.join(p_l) list.append(p_l) # Using the dictionary the success path is added to the stack. for a in range(len(dict)): try: if list.__contains__(dict[p_l]): continue list.append(dict[p_l]) p_l = dict[p_l] except: break return list
def parser_ancillary_data_Descriptor(data,i,length,end): """\ parser_ancillary_data_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). This descriptor is not parsed at the moment. The dict returned is: { "type": "ancillary_data", "contents" : unparsed_descriptor_contents } (Defined in ETSI EN 300 468 specification) """ return { "type" : "ancillary_data", "contents" : data[i+2:end] }
def users_to_names(users): """Convert a list of Users to a list of user names (str). """ return [u.display_name if u is not None else '' for u in users]
def scale_log2lin(value): """ Scale value from log10 to linear scale: 10**(value/10) Parameters ---------- value : float or array-like Value or array to be scaled Returns ------- float or array-like Scaled value """ return 10**(value/10)