content
stringlengths
42
6.51k
def strip_dev_suffix(dev): """Removes corporation suffix from developer names, if present.""" corp_suffixes = ( "incorporated", "corporation", "limited", "oy/ltd", "pty ltd", "pty. ltd", "pvt ltd", "pvt. ltd", "s.a r.l", "sa rl", "sarl", "srl", "corp", "gmbh", "l.l.c", "inc", "llc", "ltd", "pvt", "oy", "sa", "ab", ) if dev not in (None, ""): for suffix in corp_suffixes: if dev.lower().rstrip(" .").endswith(suffix): dev = dev.rstrip(" .")[: len(dev) - len(suffix) - 1].rstrip(",. ") break return dev
def decode(obj): """decode an object""" if isinstance(obj, bytes): obj = obj.decode() return obj
def _get_value(key, entry): """ :param key: :param entry: :return: """ if key in entry: return entry[key] return None
def clean_string_value_from_dict_object(dict_object, dict_name, dict_key, post_errors, empty_string_allowed=False, none_allowed=False, no_key_allowed=False): """ This function takes a target dictionary and returns the string value given by the given key. Returns None if key if not found and appends any error messages to the post_errors list :param dict_object: (type: dictionary) target object to get string from :param dict_name: (type: string) name of target dictionary :param dict_key: (type: string) target dictionary key :param post_errors: (type: list) list of error messages :param empty_string_allowed: (type: boolean) whether an empty string allowed for given key, default is False :param none_allowed: (type: boolean) whether Null values are allowed for given key, default is False :param no_key_allowed: (type: boolean) whether the or not to allow for absence of target key in target dictionary, default is False :return: (type: string or None) String type value for given target key, or None """ if dict_key not in dict_object: if no_key_allowed: return None else: post_errors.append("{!r} key not found in {!r} object".format(dict_key, dict_name)) elif dict_object[dict_key] == "" and empty_string_allowed is False: post_errors.append("Value for {!r} in {!r} object is an empty string".format(dict_key, dict_name)) elif dict_object[dict_key] is None and none_allowed is False: post_errors.append("Value for {!r} in {!r} object is Null".format(dict_key, dict_name)) elif (dict_object[dict_key] is None and none_allowed is False) and not isinstance(dict_object[dict_key], str): post_errors.append("Value for {!r} in {!r} object is not a string".format(dict_key, dict_name)) else: return dict_object[dict_key]
def is_inst(module): """Returns an indication where a particular module should be instantiated in the top level """ top_level_module = False top_level_mem = False if "attr" not in module: top_level_module = True elif module["attr"] in ["normal", "templated", "reggen_top"]: top_level_module = True elif module["attr"] in ["reggen_only"]: top_level_module = False else: raise ValueError('Attribute {} in {} is not valid' .format(module['attr'], module['name'])) if module['type'] in ['rom', 'ram_1p_scr', 'eflash']: top_level_mem = True return top_level_mem or top_level_module
def get_name(path): """ Get a file name from its full path. Args: path (str): full path Returns: name (str): just the file name """ splits = path.split("/") if splits[-1] == "": name = splits[-2] else: name = splits[-1] return name
def reinterpret_latin1_as_windows1252(wrongtext): """ Maybe this was always meant to be in a single-byte encoding, and it makes the most sense in Windows-1252. """ return wrongtext.encode('latin-1').decode('WINDOWS_1252', 'replace')
def human_readable_time_from_seconds(seconds, depth=4): """ Convert seconds to a human-readable str. The exact format may change, so this string should not be parsed. seconds (int) - a time in seconds depth (int) max larged units to report. Examples: In [2]: hrts(4) Out[2]: u'4 seconds' In [3]: hrts(400) Out[3]: u'6 minutes 40 seconds' In [4]: hrts(4000) Out[4]: u'1 hours 6 minutes 40 seconds' if depth = 2: In [3]: hrts(400) Out[3]: u'6 minutes 40 seconds' In [4]: hrts(4000) Out[4]: u'1 hours 6 minutes' """ seconds = int(seconds) if seconds == 0: return "0 seconds" # hours h = 0 # minutes m = 0 # seconds s = 0 m, s = divmod(seconds, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) used_depth = 0 hrt = "" if d > 0: if d > 1: hrt += "{} days".format(d) else: hrt += "{} day".format(d) used_depth += 1 if used_depth >= depth: return hrt else: hrt += ", " if h > 0: if h > 1: hrt += "{} hours".format(h) else: hrt += "{} hour".format(h) used_depth += 1 if used_depth >= depth: return hrt else: hrt += ", " if m > 0: if m > 1: hrt += "{} minutes".format(m) else: hrt += "{} minute".format(m) used_depth += 1 if used_depth >= depth: return hrt else: hrt += ", " if s > 0: if s > 1: hrt += "{} seconds".format(s) else: hrt += "{} second".format(s) return hrt
def get_filename_in_second_line(cpp_txt): """ From the second line of cpp_txt, get the filename Expected input argument: // ``` // Begin file_name.cpp // ... Expected output argument in this case: file_name.cpp """ result = "" for line in cpp_txt.splitlines(): if line.strip().lower().startswith('//'): # ignore if too short if 2 < len(line.lower().split()): # If second word is 'begin' if 'begin' == line.lower().split()[1]: result = line.lower().split()[-1] break return result
def format_hotkey(s, default=None, hotkey='hotkey'): """ Given a caption *s*, returns an urwid markup list with the first underscore prefixed character transformed into a (*hotkey*, char) tuple, and the rest marked with the *default* attribute (or not marked if *default* is ``None``. For example:: >>> format_hotkey('_Foo') [('hotkey', 'F'), 'oo'] >>> format_hotkey('_Foo', 'normal', 'hot') [('hot', 'F'), ('normal', 'oo')] >>> format_hotkey('Pa_use') ['Pa', ('hotkey', 'u'), 'se'] """ try: i = s.index('_') except ValueError: return [s if default is None else (default, s)] else: if i == len(s) - 1: raise ValueError('Underscore cannot be last char in s') return [ chunk for chunk in ( s[:i] if default is None else (default, s[:i]), (hotkey, s[i + 1:i + 2]), s[i + 2:] if default is None else (default, s[i + 2:]) ) if chunk ]
def basename(p): """Returns the final component of a pathname""" i = p.rfind('/') + 1 return p[i:]
def iscamel(s: str) -> bool: """ Return true if the string is in camel case format, false otherwise. """ return s.isalnum() and s[0].isalpha() and s[0].islower()
def elink_module(elink_intf, emesh_intf): """ The Adapteva ELink off-chip communication channel. Interfaces: elink_intf: The external link signals emesh_intf: The internal EMesh packet interface """ # keep track of all the myhdl generators mod_inst = [] # clock and reset config # g = ecfg_elink() # mod_inst.append(g) # receiver # g = erx(elink, emesh_e) # mod_inst.append(g) # transmitter # g = etx(elink, emesh_e) # mod_inst.append(g) # CDC FIFO # g = ecfg_fifo(emesh, emesh_e) # mod_inst.append(g) # Vendor specific IO SERDES # g = io_serdes() # mod_inst.append(g) return mod_inst
def add_role_constraint_to_app_def(app_def, roles=['*']): """Roles are a comma-delimited list. Acceptable roles include: '*' 'slave_public' '*, slave_public' """ app_def['acceptedResourceRoles'] = roles return app_def
def _num_to_month(num): """Helper function to convert month number to short name Args: num (int): the month number to convert Returns: (str): The three letter short name of the corresponding month """ return { 1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun", 7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec", }[num]
def formatnumber(value, p=1): """return a formated number with thousands seperators""" try: return "{:,.{}f}".format(float(value), int(p)) except (ValueError, TypeError): return None
def _unescape(cmd: bytes) -> bytes: """Replace escaped characters with the unescaped version.""" charmap = {b"&nbsp;": b" ", b"&apos;": b"'", b"&#40;": b"(", b"&#41;": b")"} for escape, unescape in charmap.items(): cmd = cmd.replace(escape, unescape) return cmd
def check_available(all_p, other): """ (list, list) -> bool Function checks if coordinates are available for ship placement """ return len(set(all_p).intersection(set(other))) == len(other)
def pad_to(unpadded, target_len): """ Pad a string to the target length in characters, or return the original string if it's longer than the target length. """ under = target_len - len(unpadded) if under <= 0: return unpadded return unpadded + (' ' * under)
def create_numeric_mapping(node_properties): """ Create node feature map. :param node_properties: List of features sorted. :return : Feature numeric map. """ return {value:i for i, value in enumerate(node_properties)}
def split_args(args): """Split arguments string from input. Parameters ---------- args : string arguments string that typically splitted by spaces. Parameters ---------- args_dict : list the list of the splited arguments. """ return args.split(" ")
def parse_obitools_fasta_entry(text, known_species=None): """Parse species from the OBITools extended FASTA header. See https://pythonhosted.org/OBITools/attributes.html which explains that OBITools splits the FASTA line into identifier, zero or more key=value; entries, and a free text description. We are specifically interested in the species_name, genus_name (used if species_name is missing), and taxid. >>> entry = "AP009202 species_name=Abalistes stellaris; taxid=392897; ..." >>> parse_obitools_fasta_entry(entry) (392897, 'Abalistes stellaris') Note this will *not* try to parse any key=value entries embedded in the first word (which taken as the identifier). """ taxid = 0 sp = "" identifier, description = text.split(None, 1) for part in description.split(";"): part = part.strip() # We may be more lienent that OBITools here if part.startswith("taxid="): taxid = int(part[6:].strip()) elif part.startswith("species_name="): sp = part[13:].strip() elif not sp and part.startswith("genus_name="): sp = part[11:].strip() return taxid, sp
def matchTypes(accept_types, have_types): """Given the result of parsing an Accept: header, and the available MIME types, return the acceptable types with their quality markdowns. For example: >>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5') >>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg']) [('text/html', 1.0), ('text/plain', 0.5)] Type signature: ([(str, str, float)], [str]) -> [(str, float)] """ if not accept_types: # Accept all of them default = 1 else: default = 0 match_main = {} match_sub = {} for (main, sub, q) in accept_types: if main == '*': default = max(default, q) continue elif sub == '*': match_main[main] = max(match_main.get(main, 0), q) else: match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q) accepted_list = [] order_maintainer = 0 for mtype in have_types: main, sub = mtype.split('/') if (main, sub) in match_sub: q = match_sub[(main, sub)] else: q = match_main.get(main, default) if q: accepted_list.append((1 - q, order_maintainer, q, mtype)) order_maintainer += 1 accepted_list.sort() return [(mtype, q) for (_, _, q, mtype) in accepted_list]
def patch(base, *overlays): """Recursive dictionary patching.""" for ovl in overlays: for k, v in ovl.items(): if isinstance(v, dict) and isinstance(base.get(k), dict): patch(base[k], v) else: base[k] = v return base
def getValIfDictInString(s0,dictC): """ example get connection code in FaultDescription Bus Fault on: "6 NEVADA 132. kV 3LG" => "3LG" """ for key in dictC: if key in s0: return dictC[key] return ''
def ERR_NOSUCHSERVER(sender, receipient, message): """ Error Code 402 """ return "ERROR from <" + sender + ">: " + message
def greatest_common_superclass(*types): """ Finds the greatest common superclass of the given *types. Returns None if the types are unrelated. Args: *types (type): Returns: type or None """ if len(types) == 1: return types[0] mros = [t.__mro__ for t in types] for t in mros[0]: if all(t in mro for mro in mros[1:]): break else: return None if t is object: return None return t
def scalar_eq(a, b, precision=0): """Check if two scalars are equal. Input: a : first scalar b : second scalar precision : precision to check equality Output: True if scalars are equal """ return abs(a - b) <= precision
def numericalTimeSlot(day) -> int: """ Returns the numerical day of thr timeslot given :param day: str the day to turn into :return: int The corresponding numerical time slot """ switch = { "Sunday_AM": 1, "Monday_AM": 2, "Monday_PM": 3, "Tuesday_AM": 4, "Tuesday_PM": 5, "Wednesday_AM": 6, "Wednesday_PM": 7, "Thursday_AM": 8, "Thursday_PM": 9, "Friday_AM": 10, "Friday_PM": 11, "Saturday_AM": 12, "Saturday_PM": 13 } return switch.get(day, -1)
def _format_classifiers(_classifiers: str): """Format classifiers gotten from the API.""" classifier_dict = {} output = "" for classifier in _classifiers.splitlines(): topic, content = map(str.strip, classifier.split("::", 1)) try: classifier_dict[topic].append(content) except KeyError: classifier_dict[topic] = [content] for topic, classifiers in classifier_dict.items(): output += f"[bold]{topic}[/]\n" for classifier in classifiers: output += f" {classifier}\n" return output
def _construct_resource_name(project_id, location, dataset_id, fhir_store_id, resource_id): """Constructs a resource name.""" return '/'.join([ 'projects', project_id, 'locations', location, 'datasets', dataset_id, 'fhirStores', fhir_store_id, 'fhir', resource_id ])
def _dist_calc(q_phoc, w_phoc): """ """ s = 0 qsq = 10**-8 psq = 10**-8 for q, p in zip(q_phoc, w_phoc): s += q*p qsq += q**2 psq += p**2 q_norm = qsq**(0.5) p_norm = psq**(0.5) dist = 1 - s / (q_norm * p_norm) return dist
def occ_indexes(l, sub_l): """ used for finding the concordances :param l: list :param sub_l: sub-list :return: indexes (x, y) for all occurrences of the sub-list in the list, an empty list if none found """ return [(i, i+len(sub_l)) for i in range(len(l)) if l[i:i+len(sub_l)] == sub_l]
def Powerlaw(x, a, alpha): """Power-law function Inputs: ------- ``x``: independent variable ``a``: scaling factor ``alpha``: exponent Formula: -------- ``a*x^alpha`` """ return a * x ** alpha
def prod(factors): """ Computes the product of values in a list :param factors: list of values to multiply :return: product """ product = factors[0] if len(factors) > 1: for i in factors[1:]: product *= i return product
def group_chains(chain_list): """ Group EPBC chains. """ chains = [] while len(chain_list): chain = set(chain_list.pop(0)) ## print ':', chain ii = 0 while ii < len(chain_list): c1 = sorted(chain_list[ii]) ## print '--', ii, c1, chain is0 = c1[0] in chain is1 = c1[1] in chain if is0 and is1: chain_list.pop(ii) elif is0 or is1: chain.update(c1) chain_list.pop(ii) ii = 0 else: ii += 1 ## print ii, chain, chain_list ## print '->', chain ## print chain_list chains.append(list(chain)) ## print 'EPBC chain groups:', chains aux = {} for chain in chains: aux.setdefault(len(chain), [0])[0] += 1 ## print 'EPBC chain counts:', aux return chains
def cuda_tpb_bpg_1d(x, TPB = 256): """ Get the needed blocks per grid for a 1D CUDA grid. Parameters : ------------ x : int Total number of threads TPB : int Threads per block Returns : --------- BPG : int Number of blocks per grid TPB : int Threads per block """ # Calculates the needed blocks per grid BPG = int(x/TPB + 1) return BPG, TPB
def index_along_axis(index, ndim, axis): """ Create a slice tuple for indexing into a NumPy array along a (single) given axis. Parameters ---------- index : array_like or slice The value you wish to index with along `axis`. ndim : int The number of dimensions in the array into which you are indexing (i.e. the value returned in the `.ndim` attribute). axis : int The axis along which you wish to index. Returns ------- indices : tuple A slice tuple that can be used to index an array, selecting all elements along every axis except `axis`, for which `index` is used instead. Examples -------- >>> import numpy as np >>> a = np.arange(27).reshape((3, 3, 3)) >>> index = index_along_axis([0, 2], 3, 2) >>> np.all(a[index] == a[:, :, [0, 2]]) True >>> index = index_along_axis([0, 2], 3, 1) >>> np.all(a[index] == a[:, [0, 2]]) True >>> index = index_along_axis([0, 2], 3, 0) >>> np.all(a[index] == a[[0, 2]]) True """ indices = [slice(None)] * ndim indices[axis] = index return tuple(indices)
def _scaled_average(numbers, scalar): """Internal utility function to calculate scaled averages.""" average = sum(numbers) / len(numbers) return scalar * average
def strip_package_version(package_name): """Takes a component name with a version and strips the version""" if not "/" in package_name: return package_name return package_name.split("/")[0]
def repackage_hidden(h): """Wraps hidden states in new Variables, to detach them from their history.""" return [state.detach() for state in h]
def disambiguate(lines, words, bytes_, chars, filename=None): """ When an option is specified, wc only reports the information requested by that option. The order of output always takes the form of line, word, byte, and file name. The default action is equivalent to specifying the -c, -l and -w options. If no files are specified, the standard input is used and no file name is displayed. The prompt will accept input until receiving EOF, or [^D] in most environments. -c bytes -l lines -m chars -w words """ if (not bytes_) and (not chars) and (not lines) and (not words): lines, words, bytes_ = True, True, True if chars: bytes_ = False return lines, words, bytes_, chars, filename
def translate_dna_sequence( sequence ): """Translates **DNA** to **protein**. Assumes always that the codon starts in the first position of the sequence. :param str sequence: DNA sequence :return: :class:`str` - protein sequence .. seealso:: :func:`.translate_3frames` .. rubric:: Example .. ipython:: In [1]: from rstoolbox.io import read_fastq ...: from rstoolbox.utils import translate_dna_sequence ...: import pandas as pd ...: pd.set_option('display.width', 1000) ...: pd.set_option('display.max_columns', 500) ...: df = read_fastq("../rstoolbox/tests/data/cdk2_rand_001.fasq.gz") ...: df.iloc[0]['sequence_A'] In [1]: translate_dna_sequence(df.iloc[0]['sequence_A']) """ codontable = { 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M', 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T', 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K', 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R', 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P', 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R', 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V', 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A', 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E', 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G', 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S', 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L', 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_', 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W'} protein = "" last_codon_start = len(sequence) - 2 for start in range(0, last_codon_start, 3): codon = sequence[start:start + 3] aa = codontable.get(codon, 'X') protein = protein + aa return protein
def score_word(word, score_dict): """ Given a word, score it using the scoring dicitonary. """ return sum([score_dict[letter] for letter in word])
def is_int(some_str): """ Return True, if the given string represents an integer value. """ try: int(some_str) return True except ValueError: return False
def parse_description(description): """ Parse description. Args: description: str. Returns: parsed: list. parsed description. """ parsed = description.split('\n') return parsed
def check_for_keys(key_list, dictionary): """Checks if all keys are present in dictionary""" return True if all(k in dictionary for k in key_list) else False
def match_score(s1, s2): """ align two sequences and count matching score """ return sum(s1[i] == s2[i] for i in range(min(len(s1), len(s2))))
def FlagsForCpu(cpu, target_os, sysroot): """Returns the flags to build for the given CPU.""" FLAGS = { 'x64': '-m64', 'x86': '-m32', 'arm': '-arch armv7', 'arm64': '-arch arm64', } assert ' ' not in sysroot, "sysroot path can't contain spaces" assert cpu in FLAGS, 'Unsupported CPU architecture' extra = ' -isysroot ' + sysroot if sysroot else '' if target_os == 'ios': if cpu == 'x86' or cpu == 'x64': extra += ' -mios-simulator-version-min=9.0' else: extra += ' -miphoneos-version-min=9.0' return FLAGS[cpu] + extra
def transform_sample_name(value): """Transform legacy name to new sample name.""" if '/' in value: sample_name = ''.join(value.split('/')) return sample_name else: return value
def read_file(filename, mode='rU', content=None): """Read and return the contents of the given filename.""" f = open(filename, mode) try: content = f.read() finally: f.close() return content
def parse_gff_attributes(attr_str): """ Parses a GFF/GTF attribute string and returns a dictionary of name-value pairs. The general format for a GFF3 attributes string is name1=value1;name2=value2 The general format for a GTF attribute string is name1 "value1" ; name2 "value2" The general format for a GFF attribute string is a single string that denotes the interval's group; in this case, method returns a dictionary with a single key-value pair, and key name is 'group'. """ attributes_list = attr_str.split(";") attributes = {} for name_value_pair in attributes_list: # Try splitting by '=' (GFF3) first because spaces are allowed in GFF3 # attribute; next, try double quotes for GTF. pair = name_value_pair.strip().split("=") if len(pair) == 1: pair = name_value_pair.strip().split("\"") if len(pair) == 1: # Could not split for some reason. continue if pair == '': continue name = pair[0].strip() if name == '': continue # Need to strip double quote from values value = pair[1].strip(" \"") attributes[name] = value if len(attributes) == 0: # Could not split attributes string, so entire string must be # 'group' attribute. This is the case for strictly GFF files. attributes['group'] = attr_str return attributes
def maxSatisfied(customers, grumpy, X): """ :type customers: List[int] :type grumpy: List[int] :type X: int :rtype: int """ sum_cust = 0 count = 0 start = 0 for i in range(len(customers)): if grumpy[i] == 0: sum_cust += customers[i] win = sum_cust if X == 0: return sum_cust for i in range(X): if grumpy[i] == 1: count += 1 win += customers[i] sum_cust = max(sum_cust, win) for i in range(X, len(customers)): if grumpy[i] == 1: win += customers[i] count += 1 if grumpy[start] == 1: win -= customers[start] count -= 1 start += 1 if count > X: continue sum_cust = max(win, sum_cust) return sum_cust
def glue_tokens(tokens): """The standard way in which we glue together tokens to create keys in our hashmaps""" return ' '.join(tokens)
def x_gate_counts_deterministic(shots, hex_counts=True): """X-gate circuits reference counts.""" targets = [] if hex_counts: # X targets.append({'0x1': shots}) # XX = I targets.append({'0x0': shots}) # HXH=Z targets.append({'0x0': shots}) else: # X targets.append({'1': shots}) # XX = I targets.append({'0': shots}) # HXH=Z targets.append({'0': shots}) return targets
def evaluate_poly(poly, x): """ Objective: Computes the polynomial function for a given value x. Returns that value. Input Prams: poly: tuple of numbers - value of cofficients x: value for x in f(x) Return: value of f(x) >>> evaluate_poly((0.0, 0.0, 5.0, 9.3, 7.0), 10) 79800.0 """ return sum(c*(x**i) for i, c in enumerate(poly))
def list_intersection(lst1, lst2): """List intersection.""" lst3 = [value for value in lst1 if value in lst2] return lst3
def get_att(logical_name, attribute): """The intrinsic function Fn:GetAtt returns the value of an attribute from \ a resource in the template. Args: logical_name: The logical name of the resource that contains the attribute you want. attribute: The name of the resource-specific attribute whose value you want. See the resource's reference page [#cfn-resources] for details about the attributes available for that resource type. Returns: The attribute value. """ return {'Fn::GetAtt': [logical_name, attribute]}
def merge_stations(all_stations, accessible_stations): """Merge two lists of stations.""" merged_stations = [] merged_count = 0 for station1 in all_stations: found = False for station2 in accessible_stations: if len(station1.osm_ids.intersection(station2.osm_ids)): merged_stations.append(station2) found = True merged_count += 1 if not found and station1.name: merged_stations.append(station1) print(merged_count) return merged_stations
def bitInBitmap(bitmap, bit): """bit map decoding""" flag = False for i in range(10, -1,- 1): if bitmap - 2**i >= 0: bitmap = bitmap - 2**i if 2**i == bit: flag = True else: continue return flag
def schedule_lrn_rate(train_step): """train_step equals total number of min_batch updates""" f = 1 # rl schedule factor lr = 1e-3 if train_step < 1 * f: lr = 1e-3 # 1e-1 blows up, sometimes 1e-2 blows up too. elif train_step < 2 * f: lr = 1e-4 elif train_step < 3 * f: lr = 1e-4 elif train_step < 4 * f: lr = 1e-4 elif train_step < 5 * f: lr = 1e-5 else: lr = 1e-5 return lr
def concat_things( *args ): """ Behave kinda like a print statement: convert all the things to strings. Return the large concatinated string. """ result = '' space = '' for arg in args: result += space + str(arg).strip() space = ' ' return result
def treat_input(data): """ Treats the input json to keep it in the same format as the coeficcients """ treated_data = dict() for key, value in data.items(): if key[0] == 'Q': treated_data[value] = 1 else: treated_data[key] = 1 return treated_data
def class_name(dataset, idx): """ Args - dataset: (str) Dataset name - idx: (int or string) Class index """ LINEMOD = ('ape', 'bvise', 'bowl', 'camera', 'can', 'cat', 'cup', 'driller', 'duck', 'eggbox', 'glue', 'holepuncher', 'iron', 'lamp', 'phone') YCB = ('002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can' '006_mustard_bottle', '007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', '021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', '051_large_clamp', '052_extra_large_clamp', '061_foam_brick') if 'linemod' in dataset: return LINEMOD[int(idx) - 1] elif 'ycb' in dataset: return YCB[int(idx) - 1] else: raise NotImplementedError(dataset)
def lowercase(obj): """ Make dictionary lowercase """ if isinstance(obj, dict): return {k.lower(): lowercase(v) for k, v in obj.items()} elif isinstance(obj, (list, set, tuple)): t = type(obj) return t(lowercase(o) for o in obj) elif isinstance(obj, str): return obj.lower() else: return obj
def find_on_grid(grid, wanted): """Find location of wanted on grid.""" for row_no, row in enumerate(grid): for col_no, col in enumerate(row): if col == wanted: return (row_no, col_no) return None
def loss_inversely_correlated(X, y): """ Return -1 * (concept_direction * prediction) where prediction = X[0] concept_direction = y """ prediction, *_ = X concept_direction = y return -1 * (concept_direction * prediction)
def calculate_mean(numbers): """Calculates the mean of a list of numbers Parameters ---------- numbers: iterable[numbers] Returns ------- number """ return sum(numbers) / len(numbers)
def median_val(vals): """ :param vals: an iterable such as list :return: the median of the values from the iterable """ n = len(vals) sorted_vals = sorted(vals) if n % 2 == 0: return (sorted_vals[n // 2] + sorted_vals[n // 2 - 1]) / 2 else: return sorted_vals[n // 2]
def _unpack_metric_map(names_to_tuples): """Unpacks {metric_name: (metric_value, update_op)} into separate dicts.""" metric_names = names_to_tuples.keys() value_ops, update_ops = zip(*names_to_tuples.values()) return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def dir_basename_from_pid(pid,j): """ Mapping article id from metadata to its location in the arxiv S3 tarbals. Returns dir/basename without extention and without full qualified path. It also ignores version because there is no version in the tarbals. I understand they have the updated version in the tarballs all the time. Add .txt .pdf or .jpg for the actual file you need and prepend with the path to your files dirs. """ schema="unhandled" if j['_rawid'][:4].isdigit() and '.' in j['_rawid']: # this is the current scheme from 0704 schema='current' # YYMM/YYMM.xxxxx.pdf (number of xxx is variable) dir_basename_str = '/'.join( [ j['_rawid'][:4] , j['_rawid'] ] ) elif '/' in j['_rawid']: # cond-mat/0210533 some rawids had the category and the id schema='slash' #YYMM/catYYMMxxxxx.pdf dir_basename_str = '/'.join( [ j['_rawid'].split("/")[1][:4], "".join(j['_rawid'].split("/")) ] ) else: # this is for rawid with no category, but we split category from metadata on the dot (if it has one) schema='else' #YYMM/catYYMMxxxxx.pdf dir_basename_str = '/'.join( [ j['_rawid'][:4].split("-")[0] , j['arxiv_primary_category']['term'].split(".")[0]+j['_rawid'] ] ) if schema == 'unhandled': print('unhandled mapping in pid to tarball',j['_rawid']) return dir_basename_str
def recvall(sock, n): """ returns the data from a recieved bytestream, helper function to receive n bytes or return None if EOF is hit :param sock: socket :param n: length in bytes (number of bytes) :return: message """ # data = b'' while len(data) < n: packet = sock.recv(n - len(data)) if not packet: return None data += packet # print("Daten: ", data) return data
def dist_rgb_weighted(rgb1, rgb2): """ Determine the weighted distance between two rgb colors. :arg tuple rgb1: RGB color definition :arg tuple rgb2: RGB color definition :returns: Square of the distance between provided colors :rtype: float Similar to a standard distance formula, the values are weighted to approximate human perception of color differences For efficiency, the square of the distance is returned which is sufficient for comparisons """ red_mean = (rgb1[0] + rgb2[0]) / 2.0 return ((2 + red_mean / 256) * pow(rgb1[0] - rgb2[0], 2) + 4 * pow(rgb1[1] - rgb2[1], 2) + (2 + (255 - red_mean) / 256) * pow(rgb1[2] - rgb2[2], 2))
def find_divisors(x): """ This is the "function to find divisors in order to find generators" module. This DocTest verifies that the module is correctly calculating all divisors of a number x. >>> find_divisors(10) [1, 2, 5, 10] >>> find_divisors(112) [1, 2, 4, 7, 8, 14, 16, 28, 56, 112] """ divisors = [ i for i in range(1,x+1) if x % i == 0] return divisors
def _isiterable(obj): """ Copied from putil.misc module, not included to avoid recursive inclusion of putil.pcontracts module """ try: iter(obj) except TypeError: return False else: return True
def check_argument(actual_argument_value, correct_argument_value): """ Checks whether the two given argument-values are equal. If so, returns True. If not, prints an appropriate message and returns False. """ if actual_argument_value == correct_argument_value: return True else: print(' Your code FAILS this test because the argument') print(' has the wrong value after the function call:') print(' -- The correct value after the function call is:', correct_argument_value) print(' -- Your actual value after the function call is:', actual_argument_value) return False
def RETURN_delete_negatives(numbers): """ Returns a NEW list that is the same as the given list of numbers, but with each negative number in the list DELETED from the list. For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0]. then the returned list is the NEW list [50, 12.5, 8, 0]. This function must NOT mutate the given list. Precondition: :type numbers: list where the list is a list of numbers. """ # DONE: 2. First, READ THE ABOVE TEST CODE. # Make sure that you understand it. # In particular, note how it calls the run_test function # from the module m6_mutation by using the notation: # m6_mutation.run_test(...) # Then, IMPLEMENT and test THIS FUNCTION # (using the above code for testing). note = [] for x in range(len(numbers)): if numbers[x] >= 0: note.append(numbers[x]) return note
def format_hostmaster(hostmaster): """ The DNS encodes the <local-part> as a single label, and encodes the <mail-domain> as a domain name. The single label from the <local-part> is prefaced to the domain name from <mail-domain> to form the domain name corresponding to the mailbox. Thus the mailbox HOSTMASTER@SRI- NIC.ARPA is mapped into the domain name HOSTMASTER.SRI-NIC.ARPA. If the <local-part> contains dots or other special characters, its representation in a master file will require the use of backslash quoting to ensure that the domain name is properly encoded. For example, the mailbox Action.domains@ISI.EDU would be represented as Action\.domains.ISI.EDU. http://www.ietf.org/rfc/rfc1035.txt """ name, domain = hostmaster.split('@') if '.' in name: name = name.replace('.', '\.') return "%s.%s." % (name, domain)
def totalStudents (theDictionary): """Identifies the total number of students assigned a locker. :param dict[str, str] theDictionary: key: locker number / value: student name or "open" :return: The total number of students assigned to a locker :rtype: int """ total = 0 for key in theDictionary: if ( theDictionary[key] != "open" and theDictionary[key].isalpha() ): total += 1 return total
def add_two_numbers(first, second): """Add two numbers together :first: first number :second: second number """ result = first + second return result
def rc2p(row, col, N): """ row-col to (board) position row and column go from 1 to N Test OK :param row: :param col: :param N: :return: """ # print('row:{} col:{}'.format(row,col)) return row * (N + 1) + col
def recursive(duration, J_prev, vol_prev, omega, alpha, beta): """ GARCH Recursive function to forecast the index process. .. math:: \sigma^2_t = \omega + \alpha \epsilon^2_{t-1} + \beta \sigma^2_{t-1} For the EWMA process, the parameters become: omega = 0 alpha = 1-lambda beta = lambda .. math:: \sigma^2_t = (1-\lambda) \epsilon^2_{t-1} + \lambda \sigma^2_{t-1} """ print(omega, alpha, beta) vol_values = [] for t in range(duration): vol_next = omega + alpha*J_prev**2 + beta*vol_prev vol_values.append(vol_next) vol_prev = vol_next return vol_values
def find_item(tgt_dict, key): """Recursively search dictionary for key and return value""" if key in tgt_dict: return tgt_dict[key] for k, v in tgt_dict.items(): if isinstance(v, dict): item = find_item(v, key) if item is not None: return item
def dict_get(inp, *subfields): """Find the value of the provided sequence of keys in the dictionary, if available. Retrieve the value of the dictionary in a sequence of keys if it is available. Otherwise it provides as default value the last item of the sequence ``subfields``. Args: inp (dict): the top-level dictionary. Unchanged on exit. subfields (str,object): keys, ordered by level, that have to be retrieved from topmost level of ``inp``. The last item correspond to the value to be set. Returns: The value provided by the sequence of subfields if available, otherwise the default value given as the last item of the ``subfields`` sequence. """ if len(subfields) <= 1: raise ValueError('invalid subfields, the sequence should be longer than one item as the last one is the value to be given') tmp = inp keys = subfields[:-1] val = subfields[-1] for key in keys: tmp = tmp.get(key) if tmp is None: return val return tmp
def get_attribution(file_json): """give file response in embedded frame and extract attribution info""" attributions = { 'project': file_json['project']['@id'], 'institution': file_json['institution']['@id'] } return attributions
def build_api_link(service_name, callback_url): """ Utility for building UDID.io API links """ api_link = 'https://get.udid.io/thirdparty/api/?callback=%(callback)s&service=%(service)s&schemeurl=0' % { 'callback': callback_url, 'service': service_name } return api_link
def _get_identifiers(header, data, fields): """See get_identifiers Returns ------- rv : dict map from field in <fields> to a set of identifiers """ rv = {} for field in fields: pipe_str = data[header[field]] id_value_pairs = pipe_str.split("|") for id_value_pair in id_value_pairs: # some identifiers have ":" in the value; but other dont like irogid try: i = id_value_pair.index(":") id = id_value_pair[0:i] value = id_value_pair[i+1:] if(id not in rv): rv[id] = set() rv[id].add(value) except ValueError: # then no ":" in the value and id_value_pair is just a value if(field not in rv): rv[field] = set() rv[field].add(id_value_pair) return rv
def odd_or_even(arr): """ Given a list of numbers, determine whether the sum of its elements is odd or even. Give your answer as a string matching "odd" or "even". If the input array is empty consider it as: [0] (array with a zero). :param arr: A list of numbers. :return: 'even' if the sum of the numbers in the list is even, otherwise 'odd'. """ return "even" if sum(arr) % 2 == 0 else "odd"
def pred(lis): """ This function moves the list representing a relation (first element of the list) AFTER relational term. """ # Remove all dummy semantic elements. lis = [ele for ele in lis if ele != []] # Put the relational predicate in front of the token lis[0], lis[1] = lis[1], lis[0] return lis
def calculate_intensity_group(hfi: float) -> int: """ Returns a 1-5 integer value indicating Intensity Group based on HFI. Intensity groupings are: HFI IG 0-499 1 500-999 2 1000-1999 3 2000-3999 4 4000+ 5 """ if hfi < 500: return 1 if hfi < 1000: return 2 if hfi < 2000: return 3 if hfi < 4000: return 4 return 5
def make_response(response): """ Make a byte string of the response dictionary """ response_string = response["status"] + "\r\n" keys = [key for key in response if key not in ["status", "content"]] for key in keys: response_string += key + ": " + response[key] + "\r\n" response_string += "\r\n" response_string = response_string.encode() if "content" in response: content = response["content"] if isinstance(content, str): content = content.encode() new_line = b"\r\n\r\n" response_string += content + new_line return response_string
def check_port_in_port_range(expected_port: str, dest_port_range: str): """ Check if a port is within a port range Port range maybe like *, 8080 or 8888-8889 """ if dest_port_range == '*': return True dest_ports = dest_port_range.split('-') if len(dest_ports) == 1 and \ int(dest_ports[0]) == int(expected_port): return True if len(dest_ports) == 2 and \ int(dest_ports[0]) <= int(expected_port) and \ int(dest_ports[1]) >= int(expected_port): return True return False
def count_nice_hours(rows, threshold, time_interval): """ Calculate the amount of hours that have a solar temperature over a specific threshold It is assumed that all rows correspond with a single day and each row is measured in equal time differences specified by the time_interval parameter :rows: the data that is extracted from the UVR-1611 csv file for one day :threshold: a temperature threshold that tells if the solar panel is receiving energy from the sun :time_interval: the time between each measurement in seconds :return: the amount of sunny hours a day had """ c = 0 for r in rows: if r > threshold: c=c+1 return c * time_interval / 3600
def get_mouse_pos(new_x_coord, new_y_coord): """ Gets the updated mouse position :param new_x_coord: The new x coordinate as reported by the controller :param new_y_coord: The new y coordinate as reported by the controller """ x_change = 0 y_change = 0 # if the joystick returned to its default position (0,0), stop mouse movement if not (new_x_coord == 0 and new_y_coord == 0): if new_x_coord == 0: x_change = 0 else: x_change = new_x_coord if new_y_coord == 0: y_change = 0 else: y_change = -new_y_coord return (int(x_change), int(y_change))
def div(func): """ Divergence of the input Function. Parameters ---------- func : Function or TensorFunction """ try: return func.div except AttributeError: return 0
def valid_provider(provider): """Determine if the provider provided is supported and valid. Returns True if it is valid or False if it is not valid. """ if provider in ('vmware', 'virtualbox'): return True else: return False
def html_attrs(context): """ Adds a ``no-js`` class to the ``<html>`` tag todo: Make this much more flexible """ request = context.get('request') tags = [ ('class', 'no-js') ] return ' '.join( ['%s="%s"' % t for t in tags] )
def is_ascii_chars(text): """ check if the text contains "non-latin" characters. If have non start char then return true. """ is_ascii = True try: text.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: is_ascii = False return is_ascii
def remove_null_values(data): """ Removed null value Arguments: data {dict}: data Returns: data {dict}: update data """ return {k: v for k, v in data.items() if v is not None}
def legacy_html_escape(s): """legacy HTML escape for non-unicode mode.""" s = s.replace("&", "&amp;") s = s.replace(">", "&gt;") s = s.replace("<", "&lt;") s = s.replace('"', "&#34;") s = s.replace("'", "&#39;") return s