content
stringlengths
42
6.51k
def getTranscription(subjects, id): """Get corresponding transcription for a business type coordinates""" transcription = None for s in subjects: if 'parent_subject_id' in s: if s['parent_subject_id']['$oid'] == id: transcription = {} if 'yp3_business_type_main' in s['data']['values'][0]: transcription['business_type_text'] = s['data']['values'][0]['yp3_business_type_main'] if 'yp3_business_type_reference' in s['data']['values'][0]: transcription['business_type_ref_text'] = s['data']['values'][0]['yp3_business_type_reference'] transcription['text_external_id'] = s['_id']['$oid'] return transcription
def compressChunk(tiles): """ Compress an array of tiles counting sequence of same numbers """ compressedChunk = [] countVal = 1 buffer = tiles[0] size = len(tiles) - 1 ## Compressing loop with RLE encoding index = 0 for tile in tiles: if(index == 0): index += 1; continue if(index == size): if(buffer != tile): compressedChunk.append(str(tile)) else: compressedChunk.append(str(countVal + 1) + ":" + str(buffer)) elif(buffer != tile): if(countVal == 1): compressedChunk.append(str(buffer)) else: compressedChunk.append(str(countVal) + ":" + str(buffer)) buffer = tile countVal = 1 else: countVal += 1 index += 1 return compressedChunk
def is_layout_row_empty(layout, row): """Check if the given row in a grid layout is empty""" if layout: if row<layout.rowCount(): return False for c in range(layout.columnCount()): if layout.itemAtPosition(row,c): return False return True return False
def index_rotation_period_validator(x): """ Property: ElasticsearchDestinationConfiguration.IndexRotationPeriod """ valid_types = ["NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"] if x not in valid_types: raise ValueError( "IndexRotationPeriod must be one of: %s" % ", ".join(valid_types) ) return x
def reformat_html_tags_in_raw_text(text: str) -> str: """ Replaces difficult to render common tags in the raw text with better html tags. Args: text: Section raw text from db Returns: str: Section text with converted html tags """ text = text.replace('<list listtype="unordered" ', '<ul ') text = text.replace('<list', '<ul ') text = text.replace('</list>', '</ul>') text = text.replace('<item', '<li') text = text.replace('</item>', '</li>') text = text.replace('<paragraph', '<p') text = text.replace('</paragraph>', '</p>') text = text.replace('<linkhtml', '<a') text = text.replace('</linkhtml>', '</a>') return text
def versionless(package): """ Removes the version from the package reference """ return package[:1+package[1:].find('@')]
def fill_empty_cols(data, columns): """ Creates key, value pair with null values for the columns not present. :param data: :param columns: set of columns in data :return: the updated data """ for key, val in data.items(): for col in columns: if col not in val: if col == "Previous CIN" or col == "Previous Names": val[col] = [] elif col in ["Current Directors", "Charges" , "Establishments", "Persecution"]: val[col] = {} else: val[col] = "" elif col == "Current Directors": col_val = val[col] for i_key, i_val in col_val.items(): for d_key in ["Name", "Designation", "Appointment Date"]: if d_key not in i_val: i_val[d_key] = "" col_val[i_key] = i_val val[col] = col_val elif col == "Charges": col_val = val[col] for i_key, i_val in col_val.items(): for d_key in ["Creation Date", "Modification Date", "Closure Date", "Assets Under Charge", "Amount", "Charge Holder"]: if d_key not in i_val: i_val[d_key] = "" col_val[i_key] = i_val val[col] = col_val elif col == "Establishments": col_val = val[col] for i_key, i_val in col_val.items(): for d_key in ["Establishment Name", "City", "Pincode", "Address"]: if d_key not in i_val: i_val[d_key] = "" col_val[i_key] = i_val val[col] = col_val elif col == "Persecution": col_val = val[col] for i_key, i_val in col_val.items(): for d_key in ["Defaulting Entities", "Court Name", "Prosecution Section", "Date Of Order", "Status"]: if d_key not in i_val: i_val[d_key] = "" col_val[i_key] = i_val val[col] = col_val data[key] = val return data
def is_string(s): """Check if the argument is a string.""" return isinstance(s, str)
def str_to_bin(string): """Converts a string of characters into a string containing the binary representations of the characters. Arguments: string (str) -- the string to be converted Returns: str type """ binaryLetters = list(map(lambda letter: bin(ord(letter))[2:], string)) return ''.join(map(lambda s: '0' * (8 - len(s)) + s, binaryLetters))
def str_equals(a, b): """ Constant time string equals method - no time leakage :param a: :param b: :return: """ al = len(a) bl = len(b) match = True for i in range(0, min(al, bl)): match &= a[i] == b[i] return match
def filter_based_on_rbn(lemma_objs, rbn_objs, verbose=0): """ :param list lemma_objs: list of dfn_classes.Lemma objects :param list rbn_objs: list of resources.RBN_Reader.rbn_classes.LE objects """ rbn_lemmas = {rbn_obj.lemma for rbn_obj in rbn_objs} rbn_lemma_pos = {(rbn_obj.lemma, rbn_obj.fn_pos) for rbn_obj in rbn_objs} filtered_lemma_objs = [] for lemma_obj in lemma_objs: if lemma_obj.pos: if (lemma_obj.lemma, lemma_obj.pos) in rbn_lemma_pos: filtered_lemma_objs.append(lemma_obj) else: if verbose >= 2: print(f'ignoring because not in RBN {lemma_obj.lemma, lemma_obj.pos}') else: if lemma_obj.lemma in rbn_lemmas: filtered_lemma_objs.append(lemma_obj) else: if verbose >= 2: print(f'ignoring because not in RBN {lemma_obj.lemma}') return filtered_lemma_objs
def solve(s): """ String that may have mixed uppercase and lowercase letters and your task is to convert that string to either lowercase only or uppercase :param s: a string input. :return: convert string into lower or upper case making the fewest changes. if the string contains equal number of uppercase and lowercase letters, convert the string to lowercase. """ l = 0 u = 0 for x in s: if x == x.lower(): l += 1 else: u += 1 if l < u: return s.upper() return s.lower()
def get_package_version_key(pkg_name, pkg_version): """Return unique key combining package name and version.""" return pkg_name + '@' + pkg_version
def factorial(num): """this factorial calls itself to find the factorial of a number""" if num==1: return 1 else: return(num*factorial(num-1))
def merge_two_dicts(x, y): """ https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression """ z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z
def iceil(data, base=None): """ ceil of int :param data: :param base: :return: """ if base is None or base == 0: return data return data + -data % base
def qn(namespace): """Connect tag prefix to longer namespace""" nsmap = { 'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0', } spl = namespace.split(':') return '{{{}}}{}'.format(nsmap[spl[0]], spl[1])
def get_max(list_of_string): """Return maximum value from a list of string or integer """ return max(map(int, list_of_string))
def result(obj): """Converts object into result""" if isinstance(obj, tuple): return list(obj) if isinstance(obj, list): return obj return [obj]
def changes_between_snapshots(before_dict, after_dict): """Given two 'snapshots' of an artifacts structure -- 'before' and 'after' -- return a tuple specifying which artifacts have been added, which have been removed, which have been modified, and which have remained unchanged. Both these dictionaries have artifact names as the keys and their hashes as the values.""" before_set = set(before_dict.keys()) after_set = set(after_dict.keys()) removed_artifacts = before_set.difference(after_set) added_artifacts = after_set.difference(before_set) unchanged_artifacts = set() modified_artifacts = set() for key in before_set.intersection(after_set): if before_dict[key] == after_dict[key]: unchanged_artifacts.add(key) else: modified_artifacts.add(key) return (unchanged_artifacts, modified_artifacts, added_artifacts, removed_artifacts)
def calculate_overall_score( google_gaps_percentage: float, transcript_gaps_percentage: float, google_confidence: float, alignment_score: float, weight_google_gaps: float, weight_transcript_gaps: float, weight_google_confidence: float, weight_alignment_score: float ) -> float: """ Calculates a score to predict if an alignment is "good" or not. :param google_gaps_percentage: Percentage of gaps added to google's STT output :param transcript_gaps_percentage: Percentage of gaps added to the transcript :param google_confidence: Confidence of google's STT :param alignment_score: Final score of the alignment algorithm :param weight_google_gaps: Weight for weighted sum :param weight_transcript_gaps: Weight for weighted sum :param weight_google_confidence: Weight for weighted sum :param weight_alignment_score: Weight for weighted sum :return: Score between 0 and 1 """ return ( (weight_google_gaps * google_gaps_percentage) + (weight_transcript_gaps * transcript_gaps_percentage) + (weight_google_confidence * google_confidence) + (weight_alignment_score * alignment_score) )
def GetBoxCenter(box): """ Computes coordinates of the center of a box. param: box: The box coordinates in form of ((x1,y1),(x2,y2)), where the first tuple is the coordinates of the left top corner of the box, the second tuple is the coordinates of the right bottom corner. returns: Center coordinates (x,y) """ return box[0][0]+(box[1][0]-box[0][0])//2, box[0][1]+(box[1][1]-box[0][1])//2
def merge_word_count(dic1, dic2): """Merge the word counts.""" for k in dic2: if k in dic1: dic1[k][0] += dic2[k][0] dic1[k][1] += dic2[k][1] else: dic1[k] = dic2[k] return dic1
def dict2list(obj): """ Converts an ``obj`` (Dict, List) to a List """ if isinstance(obj, dict): return [obj] return obj
def check_card_played_active(laid_card): """ Function used to check if card is a special kind of card with additional rules. :param laid_card: tuple with last played card :return: bool value, True if card is special, False otherwise """ if laid_card in [('hearts', 'K'), ('pikes', 'K')]: return True value = laid_card[1] if value in '2 3 4 J A'.split(): return True return False
def min_length(word, thresh): """ Predicate for the length of a word """ return len(word) >= thresh
def is_url(url): """ Check if given string is a valid URL """ url = url.lower() return url.startswith('http://') or url.startswith('https://')
def _one_arg_function(list_of_inputs, args, func, kwargs): """ Globally-defined helper function for pickling in multiprocessing. :param list of inputs: List of inputs to a function :param args: Names/args for those inputs :param func: A function :param kwargs: Other kwargs to pass to the function. """ new_kwargs = {} for i, inp in enumerate(list_of_inputs): new_kwargs[args[i]] = inp return func(**new_kwargs, **kwargs)
def calc_yngve_score(tree, parent): """ Calculate the Yngve Score for a given input_file. """ if type(tree) == str: return parent else: count = 0 for i, child in enumerate(reversed(tree)): count += calc_yngve_score(child, parent + i) return count
def add_hour(base_hour, dow): """Adds an hour to base_hour and ensures it's in range. Sets the day ahead if necessary. Operates on zero-based 24 hour clock hours. i.e. the 24th hour (midnight) is zero. """ next_dow = dow next_hour = base_hour + 1 if next_hour > 23: next_hour = 0 next_dow = dow + 1 if next_dow > 7: next_dow = 1 return next_hour, next_dow
def has_rule_for_os(os_dict, os_name, os_version): """Return True if os dict has a rule for os name/version.""" if 'any_os' in os_dict: return True if os_name in os_dict: version_dict = os_dict[os_name] return os_version in version_dict or 'any_version' in version_dict
def list_of_dicts_to_dict(l, major_key, other_keys): """ Args: l (list) - list of dictionaries major_key (tuple, str, float, int) - key to orient output dictionary on other_keys (list) - list of keys (tuple, str, float, int) to include in output dictionary Returns: dictionary representation of information in l """ return {d[major_key] : {other_key : d[other_key] for other_key in other_keys} for d in l}
def gen_target_cfg_items(target_cfg): """ Convert target_cfg to list of target configs """ if isinstance(target_cfg, list): # list of templates defined for this target return target_cfg elif isinstance(target_cfg, dict): # only one template defined for this target return [target_cfg] else: return None
def check_3d(coo, Lx, Ly, Lz, cyclic): """Check ``coo`` in inbounds for a maybe cyclic 3D lattice. """ x, y, z = coo OBC = not cyclic inbounds = (0 <= x < Lx) and (0 <= y < Ly) and (0 <= z < Lz) if OBC and not inbounds: return return (x % Lx, y % Ly, z % Lz)
def dequote(s): """ If a string has single or double quotes around it, remove them. Make sure the pair of quotes match. If a matching pair of quotes is not found, return the string unchanged. From ToolMakerSteve on StackOverflow: http://stackoverflow.com/a/20577580 """ if (s[0] == s[-1]) and s.startswith(("'", '"')): return s[1:-1] return s
def ingredient_prop_score(ingredients: list, masses: list, prop) -> int: """Get score for this property, by calculating the sum of the prop*mass for each ingredient used Args: ingredients ([list]): ingredients list masses ([list]): masses, indexed to the ingredients prop ([type]): the property we want to calculate score for Returns: [int]: Score for this property, or 0 if the score is negative """ return max(sum([ingredient[prop] * mass for ingredient, mass in zip(ingredients, masses)]), 0)
def split_s3_uri(s3_uri): """ Split AWS S3 URI, returns bucket and key. :type s3_uri: str :rtype: typing.Tuple[str, str] """ parts = s3_uri.split("/") bucket = parts[2] key = "/".join(parts[3:]) return bucket, key
def process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (Dictionary) raw structured data to process Returns: Dictionary. Structured data with the following schema: { "time": string, "uptime": string, "users": integer, "load_1m": float, "load_5m": float, "load_15m": float } """ int_list = ['users'] for key in int_list: if key in proc_data: try: key_int = int(proc_data[key]) proc_data[key] = key_int except (ValueError): proc_data[key] = None float_list = ['load_1m', 'load_5m', 'load_15m'] for key in float_list: if key in proc_data: try: key_float = float(proc_data[key]) proc_data[key] = key_float except (ValueError): proc_data[key] = None return proc_data
def snull(content: bytes): """Strip null bytes from the end of the string.""" return content.rstrip(b"\x00")
def find_indexes(s, index): """Finds all the given indexes of a given string""" index_list = [n for n in range(len(s)) if s.find(index, n) == n] return index_list
def find_city_and_state_from_location(loc: str) -> str: """ Get the location. :param loc: Location string from web. :return: Location string. """ split_ = loc.split("\n") return split_[0].strip()
def get_items(obj): """Get items in obj.""" return list(obj.items())
def make_album(singer, name, number = ''): """Return singers' names and album""" album = {'singer': singer, 'name': name} if number: album['number'] = number return album
def find_enclosed_zeroes(lst): """ The task of this function is to find the number of enclosed zeros between two non-zero elements of a list. We first identify the first non-zero element of the list and then find the last position value of the non-zero value. Once we get a list with zero value present between two non zero values, we idenitfy the index value of the elements which will help us determine the link between two lists in the grid. Arguments: lst- sublist of the input grid. Return: Index of the zero elements presents between two non zero values and the position of the first non-zero value. """ # Identify First non zero and last non zero element in the list try: first_nonzero = next( i for (i, e) in enumerate(lst) if e != 0 ) last_nonzero = next( len(lst) - i - 1 for (i, e) in enumerate(reversed(lst)) if e != 0 ) except StopIteration: return lst[:] # Include the element present in the last non-zero position last_nonzero_pos = last_nonzero + 1 first_nonzero_pos = first_nonzero # Find the index of the elements holding 0 values between two non-zero elements idx_list = [idx for idx, val in enumerate(lst[:first_nonzero_pos] + lst[first_nonzero_pos:last_nonzero_pos]) if val == 0] return idx_list, first_nonzero_pos
def get_corresponding_trail(path): """Given a path in the line graph of a directed graph, returns the trail in the directed graph corresponding to the path. :param path: path in a line graph of a directed graph :return: the trail in the directed graph corresponding to the path in the line graph """ trail = list() if len(path) > 0: trail = list(path[0]) for arc in path[1:]: trail.append(arc[1]) return trail
def solve_non_zero(xs): """ solve MPS knowing that `xs` does not contain any 0 KEY OBSERVATION: any sub-array *must* include xs[0] or xs[-1]: suppose you choose M, you can extend it by: - M - => entire array - M + => (M, +) + M - => (+, M) + M + => entire array therefore, use prefix and post """ if len(xs) == 0: return 0 if len(xs) == 1: return xs[0] scanr = [xs[0]] scanl = [xs[-1]] for i in range(1, len(xs)): scanr.append(scanr[-1] * xs[i]) scanl.append(scanl[-1] * xs[-i - 1]) return max(*scanl, *scanr)
def cleanFilename(sourcestring, removestring =" %:/,.\\[]<>*?"): """Clean a string by removing selected characters. Creates a legal and 'clean' source string from a string by removing some clutter and characters not allowed in filenames. A default set is given but the user can override the default string. Args: | sourcestring (string): the string to be cleaned. | removestring (string): remove all these characters from the string (optional). Returns: | (string): A cleaned-up string. Raises: | No exception is raised. """ #remove the undesireable characters return ''.join([c for c in sourcestring if c not in removestring])
def circleBox(x,y,r): """Return the coordinates of the box enclosing a circle.""" return x-r,y-r,x+r,y+r
def print_arr(arr): """ Function to get the string version of an array in one line. """ return "\n".join(arr)
def _normalize(filename): """ .. function:: _normalize(filename) Prepends the filename with the path pointing to the main file. :type filename: string :rtype: string """ import os abs_path = os.path.abspath(__file__) dir_name = os.path.dirname(abs_path) return os.path.join(dir_name, filename)
def dict_sweep(d, vals=[".", "-", "", "NA", "none", " ", "Not Available", "unknown"]): """ @param d: a dictionary @param vals: a string or list of strings to sweep """ for key, val in d.items(): if val in vals: del d[key] elif isinstance(val, list): for item in val: if item in vals: val.remove(item) elif isinstance(item, dict): dict_sweep(item, vals) if len(val) == 0: del d[key] elif isinstance(val, dict): dict_sweep(val, vals) if len(val) == 0: del d[key] return d
def tot_energies(particles): """Adds up all the energy in the particles list""" kinetic = 0.0 potential = 0.0 for part in particles: if part.forceful: kinetic += part.ke potential += part.gpe # This will double count return kinetic, potential / 2, kinetic + potential / 2
def greedy(items, maxCost, keyFunction): """Assumes items is a list, maxCost >= 0 keyFunction maps elements of items to numbers, defines sorting order Assumes items have getCost and getValue methods that return numbers """ itemsCopy = sorted(items, key=keyFunction, reverse=True) result = [] totalValue, totalCost = 0.0, 0.0 for i in range(len(itemsCopy)): if (totalCost + itemsCopy[i].getCost()) <= maxCost: result.append(itemsCopy[i]) totalCost += itemsCopy[i].getCost() totalValue += itemsCopy[i].getValue() return (result, totalValue)
def t(line): """ Selects theta from a given line. """ _, t = line return t
def _BsmTokenIsEndOfString(value, unused_context): """Construct context parser helper function to replace lambda.""" return value == b'\x00'
def _IsReadableStream(obj): """Checks whether obj is a file-like readable stream. :Returns: boolean """ if (hasattr(obj, 'read') and callable(getattr(obj, 'read'))): return True return False
def build_content_type(format, encoding='utf-8'): """ Appends character encoding to the provided format if not already present. """ if 'charset' in format: return format if format in ('application/json', 'text/javascript'): return format return "%s; charset=%s" % (format, encoding)
def poly(x, pars): """ A polynomial function with pars following the polyfit convention """ result = x*0 # do x*0 to keep shape of x (scalar or array) if len(pars) == 0: return result result += pars[-1] for i in range(1, len(pars)): result += pars[-i-1]*x x = x*x return result
def calculate_fuel_mass_flow(Firepower, LHV): """Compute the mass flux to be assigned for each contributing surface of fuel body. Assumes 4 total fuel blocks, with 3/4 combustion chamber diameter as span, no flow from front and back faces, square cross section Args: Firepower (double): user-defined firepower for the analysis LHV (double): user-defined lower heating value for the Wood Returns: m_dot_fuel_total (double): mass flow rate for fuel """ HV_unit_conv = LHV*1000 # Converted to kJ/kg m_dot_fuel_total = Firepower/HV_unit_conv # kg/s print('Total mass flow rate for given firepower (kg/s):'+"\n") print(m_dot_fuel_total) return m_dot_fuel_total
def printTermsAsLinks(rows): """ Print terms as a link list (pun intended). :param rows: Table rows. :type rows: dict iterator :returns: HTML-formatted string. """ string = "" for row in rows: string += '<li><a href="/term=%s">%s</a></li>' % (row['concept_id'], row['term_string']) return string
def make_attrstring(attr): """Returns an attribute string in the form key="val" """ attrstring = ' '.join(['%s="%s"' % (k, v) for (k, v) in attr.items()]) return '%s%s' % ((' ' if attrstring != '' else ''), attrstring)
def emit(*args): """Return blocks of code as a string.""" return "\n".join([str(x) for x in args if len(str(x)) > 0])
def getminbyindex(inputlist,indexlist): """can get PIF calcs using flox data and crossvals note that cross ref of indexlist with Ti vs Te timestamps is needed for segregation of the data """ minbyindexlist=[min(inputlist[indexlist[i]:indexlist[i+1]]) for i in range(len(indexlist)-1)] return minbyindexlist
def isclose(num_a, num_b, rel_tol=1e-09, abs_tol=0.0): """ Utility function returning whether two floats are equal (close) to each other. """ return abs(num_a - num_b) <= max(rel_tol * max(abs(num_a), abs(num_b)), abs_tol)
def midi_to_freq(num): """ Takes a MIDI number and returns a frequency in Hz for corresponding note. """ num_a = num - 69 freq = 440 * 2**(num_a / 12.0) return freq
def return_one(result): """return one statement""" return " return " + result
def city_country(name, country): """Return neatly formatted city's name and country it is in.""" return f"{name.title()}, {country.title()}"
def format_box(first_point, second_point): """Format boxes for use with openCV""" x_dist = first_point[0] * 2 y_dist = first_point[1] * 2 width = second_point[0] * 2 - x_dist height = second_point[1] * 2 - y_dist return int(x_dist), int(y_dist), int(width), int(height)
def coords_json_to_api_tilt(ang_down): """converts from API coordinates to robot coordinates.""" ang_up = -ang_down return ang_up
def fieldsDifferent(first, second, fields): """ Checks if all `fields` of `first` are different than `second`""" for field in fields: if field in first and field in second: if first[field] == second[field]: return False return True
def parse_b6o_line(line): """Parse a line in a BLAST tabular file (b6o). Parameters ---------- line : str Line to parse. Returns ------- tuple of (str, str, float, int, int, int) Query, subject, score, length, start, end. Notes ----- BLAST tabular format: qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore .. _BLAST manual: https://www.ncbi.nlm.nih.gov/books/NBK279684/ """ x = line.rstrip().split('\t') qseqid, sseqid, length, score = x[0], x[1], int(x[3]), float(x[11]) sstart, send = sorted([int(x[8]), int(x[9])]) return qseqid, sseqid, score, length, sstart, send
def is_palindrome(s): """Return True if s is a palindrome and False otherwise. String s can include punctation marks that will not influence the result. Comparison of forwards/backwards strings is also case-insensitive.""" # cut off non-alphabetic characters from front and end while not s[0].isalpha(): s = s[1:] while not s[-1].isalpha(): s = s[:-1] # base cases if s[0].lower() != s[-1].lower(): return False if len(s) <= 2: return True # check if the string without first and last letters is a palindrome return is_palindrome(s[1:-1])
def NOT(filter_): """Exclude docs matching the filter passed in""" return {"not": filter_}
def convert_endianness(array: bytes): """ Switch between big-endian order and little-endian order. Bitcoin stores bytes in little-endian byte order but we human beings are more comfortable with big-endian one. Therefore, we convert the endianness before showing values to users. Note that bytes objects are immutable """ assert isinstance(array, bytes) return array[::-1]
def sigmoid(v, x): """This is the EC50 sigmoid function v is a vector of parameters: v[0] = minimum allowed value v[1] = maximum allowed value v[2] = ec50 v[3] = Hill coefficient """ p_min, p_max, ec50, hill = v return p_min + ((p_max - p_min) / (1 + (x / ec50) ** hill))
def get_volume_id_to_instance_id_map(volume_info): """Generate volume id to instance id map. Unattached volumes will be ignored.""" instance_id_to_volume_id_map = {} for volume_id in volume_info: volume = volume_info.get(volume_id) if volume.get("Attachments"): # Attached volumes ec2_instance_id = volume.get("Attachments")[0].get("InstanceId") instance_id_to_volume_id_map[volume_id] = ec2_instance_id return instance_id_to_volume_id_map
def show_etherscan(s) -> str: """ Return proper links to Etherscan based upon the currently configured network. """ return f'https://etherscan.io/address/{s}'
def is_search_length(splunk_record_key): """Return True if the given string is a search length key. :param splunk_record key: The string to check :type splunk_record_key: str :rtype: bool """ return splunk_record_key == 'searchlength'
def expand_or_falsify_vrle(rle, vrle, fixed=False, variableLength=False): """ Given a run-length encoded sequence (e.g. ``[('A', 3), ('B', 4)]``) and (usually) a variable run-length encoded sequence (e.g. ``[('A', 2, 3), ('B', 1, 2)]``) expand the VRLE to include the case of the RLE, if they can be consistent. If they cannot, return False. If vrle is None, this indicates it hasn't been found yet, so rle is simply expanded to a VRLE. If vrle is False, this indicates that a counterexample has already been found, so False is returned again. If variableLength is set to True, patterns will be merged even if it is a different length from the vrle, as long as the overlapping part is consistent. """ suf = ['fixed'] if fixed else [] if vrle == False: return False elif vrle is None: return [((r[0], r[1], r[1], 'fixed') if fixed else (r[0], r[1], r[1])) for r in rle] # Just accept the new one out = [] lr, lv = len(rle), len(vrle) lc = min(lr, lv) # length of overlapping block if lr == lv: for (r, v) in zip(rle, vrle): c, m, M = v[:3] n = r[1] if r[0] == c: if n >= m and n <= M: out.append(v) elif n < m: out.append((c, n, M, 'fixed') if fixed else (c, n, M)) else: # n > M out.append((c, m, n, 'fixed') if fixed else (c, m, n)) else: return False # All consistent return out elif not variableLength: return False for (r, v) in zip(rle[:lc], vrle[:lc]): c, m, M = v[:3] n = r[1] if r[0] == c: if n >= m and n <= M: out.append(v) elif n < m: out.append((c, n, M, 'fixed') if fixed else (c, n, M)) else: # n > M out.append((c, m, n, 'fixed') if fixed else (c, m, n)) else: return False if lv == lc: # variable one is shorter for r in rle[lc:]: c, f = r[:2] out.append((c, 0, f, 'fixed') if fixed else (c, 0, f)) else: for v in vrle[lc:]: c, m, M = v[:3] out.append((c, 0, M, 'fixed') if fixed else (c, 0, M)) return out
def contrast_color(color, blackish='black', whiteish='whitesmoke'): """Selects white(ish) or black(ish) for text to contrast over some RGB""" luminance = (0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2]) if luminance > 0.6: return blackish return whiteish
def defocus(w020, radius, zi): """Return the amount focus shift or defocus, deltaZ Parameters ---------- w020 : float Wavefront error coefficient for defocus. w020 is the maximum wavefront error measured at the edge of the pupil. radius : float The radius of the aperture in units of length (usually mm) zi : float Image distance (or the distance of the observation plane) in the same units of length as ``radius``. For objects at infinity, ``zi`` is the focal length of the lens. Returns ------- deltaZ : float The amount of defocus along the optical axis corresponding to the given wavefront error. Notes ----- The relation between the wavefront error and the defocus as derived using paraxial assumption and scalar diffraction theory. It is given as: .. math:: W_{020} = \\frac{\\delta_z a^2}{2 z_i(z_i + \delta_z)} which may also be approximated as :math:`W_{020} = \delta_z/8N^2`, where `N` is the f-number. See Also -------- w020FromDefocus() """ return (2.0*zi**2*w020)/(radius**2 - 2.0*zi*w020)
def find_splits(array1: list, array2: list) -> list: """Find the split points of the given array of events""" keys = set() for event in array1: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) for event in array2: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) return list(sorted(keys))
def call_if(tf, callable, *args, **kwargs): """Call the given callable with the given arguments iff tf is True.""" if tf: return callable(*args, **kwargs)
def ground_collision_condition(cname, outs): """ground collision premature termnation condition""" return cname == "plant" and outs["states"][11] <= 0.0
def Slide_f(header, body, footer): """ :param header: The "header" of this Slide :param body: The "body" of this Slide :param footer: The "footer" of this Slide """ return header + body + footer
def _get_line_label( pdata: dict, label: (str or bool) = '', **kwargs ) -> str: """Parse or auto generate a line label. Args: pdata: plot data structure label (optional): label name if string or autogenerated label if True, else no label Returns: string """ if isinstance(label, bool) and label: return pdata['system'] + pdata['variable_name'] elif isinstance(label, str) and len(label) > 0: return label else: return ''
def cipher(text, shift, encrypt=True): """Encypt text input using a basic (Caesar cipher) by shifting each alphabet left or right by a certain number Parameters ---------- text : str Text to be encrypted shift : int Number of alphabet positions the text will be shifted encrypt: bool True by default Returns: encypted (or decrypted) string ------- Examples: >>> cipher(today, 1) upebz """ alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' new_text = '' for c in text: index = alphabet.find(c) if index == -1: new_text += c else: new_index = index + shift if encrypt == True else index - shift new_index %= len(alphabet) new_text += alphabet[new_index:new_index+1] return new_text
def is_in_domain(i, j, m, n): """ Checks if i,j inside m,n """ return (i >= 0) and (i < m) and (j >= 0) and (j < n)
def get_file_lines(name): """ Get the number of lines in a file Parameters ---------- str name: filepath and name of file """ lines = 0 with open(name, "r") as stream: lines = len(stream.readlines()) return lines
def convert_to_codec_key(value): """ Normalize code key value (encoding codecs must be lower case and must not contain any dashes). :param value: value to convert. """ if not value: # fallback to utf-8 value = 'UTF-8' # UTF-8 -> utf_8 converted = value.replace('-', '_').lower() # fix some corner cases, see https://github.com/pyQode/pyQode/issues/11 all_aliases = { 'ascii': [ 'us_ascii', 'us', 'ansi_x3.4_1968', 'cp367', 'csascii', 'ibm367', 'iso_ir_6', 'iso646_us', 'iso_646.irv:1991' ], 'utf-7': [ 'csunicode11utf7', 'unicode_1_1_utf_7', 'unicode_2_0_utf_7', 'x_unicode_1_1_utf_7', 'x_unicode_2_0_utf_7', ], 'utf_8': [ 'unicode_1_1_utf_8', 'unicode_2_0_utf_8', 'x_unicode_1_1_utf_8', 'x_unicode_2_0_utf_8', ], 'utf_16': [ 'utf_16le', 'ucs_2', 'unicode', 'iso_10646_ucs2' ], 'latin_1': ['iso_8859_1'] } for key, aliases in all_aliases.items(): if converted in aliases: return key return converted
def _filter_metadata(metadata, **kwargs): """Filter metadata before return it to api. Some metadata fields are not json compatible or only used in db/api internally. We should strip these fields out before return to api. """ if not isinstance(metadata, dict): return metadata filtered_metadata = {} for key, value in metadata.items(): if key == '_self': filtered_metadata[key] = { 'name': value['name'], 'description': value.get('description', None), 'default_value': value.get('default_value', None), 'is_required': value.get('is_required', False), 'required_in_whole_config': value.get( 'required_in_whole_config', False), 'js_validator': value.get('js_validator', None), 'options': value.get('options', None), 'required_in_options': value.get( 'required_in_options', False), 'field_type': value.get( 'field_type_data', 'str'), 'display_type': value.get('display_type', None), 'mapping_to': value.get('mapping_to', None) } else: filtered_metadata[key] = _filter_metadata(value, **kwargs) return filtered_metadata
def get_daemon_info(_): """Mock replacement of :meth:`aiida.engine.daemon.client.DaemonClient.get_daemon_info`.""" return { 'status': 'ok', 'time': 1576588772.459435, 'info': { 'cpu': 0.0, 'mem': 0.028, 'pid': 111015, 'create_time': 1576582938.75, }, 'id': 'a1c0d76c94304d62adfb36e30d335dd0' }
def validate(f, temp, key=""): """ Validate python dict using template :param f: file data :type f: dict :param temp: template data :type temp: dict :param key: yaml-file key :type key: str :return: list of asent keys :rtype: list """ absent_keys = [] if isinstance(f, dict): # check source data to supported type (skip encryption info) for el in temp: value = temp[el] if not (el in f): absent_keys.append(key + "/" + str(el)) continue if isinstance(value, list): for e in f[el]: absent_keys.extend(validate(e, temp[el][0], key + "/" + el)) elif isinstance(value, dict): absent_keys.extend(validate(f[el], temp[el], key + "/" + el)) return absent_keys
def interpretExit(e): """ interpret exit code returns (verdict, message) """ assert e != 0, "e should not be 0" if e == 139: return "R", "runtime error" if e == 152: return "T", "time limit exceeded" return "R", f"exit code {e}"
def lorentzian_one(x, fwhm, mu): """ Returns a Lorentzian line shape at x with FWHM fwhm and mean mu """ return 1.0 / (1+4*((x-mu)/fwhm)**2)
def _maxlength(X): """ Returns the maximum length of signal trajectories X """ N = 0 for x in X: if len(x) > N: N = len(x) return N
def get_subitems(items, parent_item=None): """Search a flat item list for child items.""" result_items = [] found = False if parent_item: required_indent = parent_item['indent'] + 1 else: required_indent = 1 for item in items: if parent_item: if not found and item['id'] != parent_item['id']: continue else: found = True if item['indent'] == parent_item['indent'] and item['id'] != parent_item['id']: return result_items elif item['indent'] == required_indent and found: result_items.append(item) elif item['indent'] == required_indent: result_items.append(item) return result_items
def get_words(sentence): """Get the words from input string""" s = sentence return s
def create_organization(org_id, org_alias, org_name, org_type, description, org_url, prev, cur, timestamp, pt_id=[]): """ Constructs the payload to be stored in the state storage. Args: org_id (str): The uuid of the organization org_alias (str): The alias of the organization org_name (str): The name of the organization org_type (str): The type of the organization description (str): The description of the organization org_url (str): The url of the organization prev (str): The previous block id of the transaction (default "0") cur (str): the current block id of the transaction timestamp (str): The UTC time for when the transaction was submitted pt_id (list of str): The list of the part uuid associated with the organization (default []) Returns: type: dict The dictionary pertaining all the param is created and returned to be stored on the state storage. """ return { "uuid" : org_id, "alias" : org_alias, "name" : org_name, "type" : org_type, "description" : description, "url" : org_url, "prev_block" : prev, "cur_block" : cur, "timestamp" : timestamp, "pt_list" : pt_id }
def get_word_count(start, length, bits): """ Get the number of words that the requested bits would occupy. We have to take into account how many bits are in a word and the fact that the number of requested bits can span multipe words. """ newStart = start % bits newEnd = newStart + length totalWords = (newEnd-1) / bits return int(totalWords + 1)