content
stringlengths
42
6.51k
def SplitStringAtSeparator(string,sep): """ split string at commas :param str string: string :param str sep: separator character :return: strlst(lst) - list of splited strings """ #return filter(lambda s: len(s) > 0, string.split(sep)) strlst=[]; items=string.split(sep) for s in items: strlst.append(s.strip()) return strlst
def getGoogleLink(addr): """Return google maps link for given address""" link = "http://maps.google.com/?q=" + addr + "&sensor=true" return(link)
def blend_components(original_component, tint_component, tint_strength): """Blend the two color components with the given tint strength. original_color and tint_color are integers in [0, 255]. Formula: http://stackoverflow.com/a/29321264 """ tint_factor = tint_strength / 100. return round(( (1 - tint_factor) * original_component ** 2 + tint_factor * tint_component ** 2) ** 0.5)
def ProcessPsiInfo(PhiPsiInfo, Precision): """Process phi and psi angels for scatter plots. """ PhiAngles = [] PsiAngles = [] for ResNum in PhiPsiInfo["ResNums"]: Phi = "%.*f" % (Precision, PhiPsiInfo["Phi"][ResNum]) Psi = "%.*f" % (Precision, PhiPsiInfo["Psi"][ResNum]) PhiAngles.append(float(Phi)) PsiAngles.append(float(Psi)) return PhiAngles, PsiAngles
def sort_gtf(gtf_dict): """ The exons for each transcript in the TALON gtf file are not sorted correctly. (issue with talon) :param gtf_dict: transcript_id (key): [(exon_start, exon_end), (exon_start, exon_end), (exon_start, exon_end) etc...](value) :return: gtf_dict but with sorted exons """ for key, value in gtf_dict.items(): gtf_dict[key] = sorted(value) return gtf_dict
def get_mean_3(x, y, z): """This function calculates the mean of three values""" return (x+y+z)/3
def fibonacci(n): """ Return the n_th Fibonnaci number $F_n$. The Fibonacci sequence starts 0, 1, 1, 2, 3, 5, 8, ..., and is defined as $F_n = F_{n-1} + F_{n-2}.$ >>> fibonacci(0) 0 >>> fibonacci(5) 5 >>> fibonacci(10) 55 """ fibs = [0, 1] for i in range(2, n+1): fibs.append(fibs[i-1] + fibs[i-2]) return fibs[n]
def binary_search(input_array, value): """Your code goes here.""" head = 0 end = len(input_array) - 1 index = int((end - head)/2) while True: if end < head: return -1 if input_array[index] == value: return index elif value > input_array[index]: head = index + 1 elif value < input_array[index]: end = index - 1 index = head + int((end - head)/2)
def test_tiff(h, f): """TIFF (can be in Motorola or Intel byte order)""" if h[:2] in ('MM', 'II'): return 'tiff'
def simplified_pos(pos, tagset=None, default=''): """ Return a simplified POS tag for a full POS tag `pos` belonging to a tagset `tagset`. By default the WordNet tagset is assumed. Does the following conversion by default: - all N... (noun) tags to 'N' - all V... (verb) tags to 'V' - all ADJ... (adjective) tags to 'ADJ' - all ADV... (adverb) tags to 'ADV' - all other to `default` Does the following conversion by with ``tagset=='penn'``: - all N... (noun) tags to 'N' - all V... (verb) tags to 'V' - all JJ... (adjective) tags to 'ADJ' - all RB... (adverb) tags to 'ADV' - all other to `default` :param pos: a POS tag :param tagset: the tagset used for `pos` :param default: default return value when tag could not be simplified :return: simplified tag """ if tagset == 'penn': if pos.startswith('N') or pos.startswith('V'): return pos[0] elif pos.startswith('JJ'): return 'ADJ' elif pos.startswith('RB'): return 'ADV' else: return default else: # default: WordNet, STTS or unknown if pos.startswith('N') or pos.startswith('V'): return pos[0] elif pos.startswith('ADJ') or pos.startswith('ADV'): return pos[:3] else: return default
def make_sentiment(value): """Return a sentiment, which represents a value that may not exist. >>> positive = make_sentiment(0.2) >>> neutral = make_sentiment(0) >>> unknown = make_sentiment(None) >>> has_sentiment(positive) True >>> has_sentiment(neutral) True >>> has_sentiment(unknown) False >>> sentiment_value(positive) 0.2 >>> sentiment_value(neutral) 0 """ assert value is None or (value >= -1 and value <= 1), 'Illegal value' if value is None: return (False, value) else: return (True, value)
def bytes_to_int(input_bytes, signed=False): """Returns the integer represented by the given array of bytes. Pre-sets the byteorder to be little-endian. Arguments: input_bytes -- Holds the array of bytes to convert. The argument must either support the buffer protocol or be an iterable object producing bytes. Bytes and bytearray are examples of built-in objects that support the buffer protocol. Keyword Arguments: signed {bool} -- Indicates whether two's complement is used to represent the integer. (default: {False}) Returns: [integer] -- Integer value converted from the supplied bytes. """ return int.from_bytes(input_bytes, byteorder='little', signed=signed)
def UniqueFrozenset(seq): """Makes C{frozenset} from sequence after checking for duplicate elements. @raise ValueError: When there are duplicate elements """ if isinstance(seq, (list, tuple)): items = seq else: items = list(seq) result = frozenset(items) if len(items) != len(result): raise ValueError("Duplicate values found") return result
def jsonize_phrase_set(phrase_set, term_to_str_fn=lambda tpl: ' '.join(tpl)): """Input: a set of string tuples Val: a list of strings, where tuples have been separated by spaces""" if term_to_str_fn: return [term_to_str_fn(tpl) for tpl in phrase_set] else: return list(phrase_set)
def deltax(firstx, lastx, npoints, **kwargs): """-> (lastx - firstx) / (npoints - 1)""" return (lastx - firstx) / (npoints - 1)
def all_relevant_registers_filled(registers, relevant_registers) : """ @param registers : a dictionary of each register content { 'register #': 'value' } @param relevant_registers : an ordered list of register indexes related to that method call @rtype : True if all the relevant_registers are filled, False if not """ answer = True for i in relevant_registers : # assert a False answer for null registers from the "move-result" instruction if not(i in registers) or (i in registers and len(registers[i]) < 1) : answer = False return answer
def calculate_kill_death_ratio(kills, deaths): """ kdr = kills / deaths Ensure the MLG's have some deaths. """ if deaths > 0: kdr = kills / deaths else: kdr = kills return round(kdr, 2)
def lerp(value1, value2, fac): """ linear interpolation """ return value1 * (1-fac) + value2 * fac
def combine_nested_dict_keys(d: dict, delimiter: str = ".") -> list: """combine nested dictionary keys and converts to a list Args: d (dict): dict Returns: list: results """ results = [] for key, value in d.items(): if isinstance(value, dict): recursive_results = combine_nested_dict_keys(d=value) for recursive_result in recursive_results: concat = key + delimiter + recursive_result results.append(concat) else: results.append(key) return results
def convert_lanes_to_edges(lanes): """ Convert lanes (iterable) to edges (iterable). Remove lane index from the end and then remove duplicates while retaining order. Also works with single lane str. >>> lanes >>> ['1175109_0', '1175109_1', '1175109_2', '1183934_0', '1183934_1', '1183934_2'] >>> convert_lanes_to_edges(lanes) >>> ['1175109', '1183934'] """ if isinstance(lanes, str): return lanes.rsplit('_', 1)[0] return list(dict.fromkeys(map(lambda x: x.rsplit('_', 1)[0], lanes)))
def _md_json_matcher(art): """ Is this artifact the metadata.json file? """ return art['name'].__contains__('metadata') & art['name'].endswith('.json')
def get_frame(name): """Get log stack frame.""" return (name, None, None, None)
def unescape(s): """ http://stackoverflow.com/a/24519338/2003487 """ import re import codecs ESCAPE_SEQUENCE_RE = re.compile(r''' ( \\U........ # 8-digit hex escapes | \\u.... # 4-digit hex escapes | \\x.. # 2-digit hex escapes | \\[0-7]{1,3} # Octal escapes | \\N\{[^}]+\} # Unicode characters by name | \\[\\'"abfnrtv] # Single-character escapes )''', re.UNICODE | re.VERBOSE) def decode_match(match): return codecs.decode(match.group(0), 'unicode-escape') return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def _GetVariableName(name): """Get variable-formatted name.""" name = name.replace('\'', '').replace('.', '').replace('/', '') name = '_'.join([n.lower() for n in name.split(' ')]) return name
def element_add(a, b): """ Add two tuples, elementwise """ return tuple(x + y for x, y in zip(a, b))
def normalize_constraint(c): """ :param c: :type c: claripy.ast.Base :return: """ if not hasattr(c, 'op') or not hasattr(c, 'args') or not hasattr(c, 'length'): return c normalized_args = [normalize_constraint(arg) for arg in c.args] if c.op in ['__eq__', '__ne__']: sorted_args = list(sorted(normalized_args, key=lambda v: (len(v.variables), hash(v)), reverse=True)) return c.make_like(c.op, sorted_args) else: return c.make_like(c.op, c.args)
def make_train_dict(input_ids, attention_masks, labels): """ Put things in a dictionary. Just a small utility function. """ return {'input_ids': input_ids, 'attention_mask': attention_masks, 'labels': labels}
def NumberFromDigits(*args): """Given an array of Int digits, returns an Int as if they were read left-to-right""" num = 0 for arg in args: num = (10 * num) + arg return num
def is_valid_entity(entity): """Check if entity if formatted correctly""" return isinstance(entity, str)
def parse_version(v): """ Parses a version string into its component parts. String is expected to follow the pattern "0.1.1.dev0", which would be parsed into (0, 1, 1, 0). The first two components are required. The third is set to 0 if missing, and the fourth to None. Parameters ---------- v : str Version string using syntax described above. Returns ------- tuple with format (int, int, int, int or None) """ v4 = None if 'dev' in v: v4 = int(v.split('dev')[1]) v = v.split('dev')[0] # 0.1.dev0 -> 0.1. if (v[-1] == '.'): v = v[:-1] # 0.1. -> 0.1 v = v.split('.') v3 = 0 if (len(v) == 3): v3 = int(v[2]) v2 = int(v[1]) v1 = int(v[0]) return (v1, v2, v3, v4)
def getIncomparable(rs1, rs2): """ Return the set of problem IDs which are only solved by rs1 :param rs1: result set of the 1st approach :param rs2: result set of the 2nd approach :return: a set {pid, stim} """ inc = set() for pid in rs1.keys(): r2 = rs2[pid] if r2 is None: inc.add(pid) return inc
def calculate(power): """Returns the sum of the digits of the number 2 to the power of the specified number""" answer = sum(list(map(int, str((2 ** power))))) return answer
def rgb_to_ansi(r, g, b): """ Convert an rgb color to ansi color """ r, g, b = int(r), int(g), int(b) if (r == g & g == b): if (r < 8): return int(16) if (r > 248): return int(230) return int(round(((r - 8) / 247) * 24) + 232) r_in_ansi_range = int(round(float(r) / 51)) g_in_ansi_range = int(round(float(g) / 51)) b_in_ansi_range = int(round(float(b) / 51)) ansi = 16 + (36 * r_in_ansi_range) + (6 * g_in_ansi_range) + b_in_ansi_range return int(ansi)
def orthoProj(pointA, pointB, S): """ Orthogonal projection of a point on a line. Parameters ---------- pointA : Tuple Point to project on the line. pointB : Tuple Point belonging to the line. S : Tuple Slope of the line. Returns ------- projection : Tuple Porjection of Point A on the line (Point B, Slope). """ d = (S[0]**2 + S[1]**2)**.5 BH = ((pointA[0] - pointB[0])*S[0] + (pointA[1] - pointB[1])*S[1])/d xH = pointB[0] + BH/d*S[0] yH = pointB[1] + BH/d*S[1] return (xH, yH)
def gety(x,a,b,c): """ ax + by + c = 0 """ y = (-a*x - c) / b return y
def general_setup_validator(values): """ :param values: Dictionary of input values. :return: String to be viewed in UI to user. """ log = "" if values['sys_v_ipc'] is True: values['sys_v_ipc'] = False log += "Changed SYS_V_IPC to False. " if int(values['kernel_log_buffer']) > 32: values['kernel_log_buffer'] = 1 log += "Changed kernel_log_buffer to 1, because it was larger than 32." if not str(values['cpu_kernel_log_buffer']).isdigit(): values['cpu_kernel_log_buffer'] = 64 log += "Changed cpu_kernel_log_buffer to 64 because it was not number." return log
def fix_date_elements_folder_path(partitions, types): """ Replace the '-' separator in time dimension with '/' so they can be used in folder paths """ fixed_partitions = [] for partition, type in zip(partitions, types): if type == "time": fixed_partitions.append(partition.replace("-", "/")) else: fixed_partitions.append(partition) return fixed_partitions
def penn_to_wn(tag): """ Convert between a Penn Treebank tag to a simplified Wordnet tag """ if tag.startswith('N'): return 'n' if tag.startswith('V'): return 'v' if tag.startswith('J'): return 'a' if tag.startswith('R'): return 'r' return None
def __find_ind(d, ind, pos): """ Return fields from arg1 with value arg2 in indicator position arg3 (1 or 2). """ if ind is None: return d return tuple([f for f in d if f[pos] == ind])
def check_mode_python(mode: str) -> bool: """ Checks whether a script should be read respecting Python syntax or traditional shell syntax based on syntax variable in script. Returns true if Python specified, false if 'shell', 'bash', etc. specified, and raises an exception for other cases. """ if mode.lower() == "python": return True elif mode.lower() in {"shell", "sh", "bash"}: return False else: raise TypeError(f"invalid syntax type specified: {mode}")
def parse_cu_time(cu_time): """ Parse times given in hhmm or hmm format into the hh:mm format used by Hyperschedule. """ hours, minutes = cu_time[:-2], cu_time[-2:] return "{:02d}:{:02d}".format(int(hours), int(minutes))
def trec_format(results, query_id, run_id): """Outputs results in TREC format""" out_str = "" rank = 1 for doc_id, score in sorted(results.items(), key=lambda x: x[1], reverse=True): out_str += query_id + "\tQ0\t" + doc_id + "\t" + str(rank) + "\t" + str(score) + "\t" + run_id + "\n" rank += 1 return out_str
def stats_file_keys(gene_number): """Return fiels in stats file, ordered, as a list of string""" return [ 'popsize', 'genenumber', 'generationnumber', 'diversity', ] + ['viabilityratio' + str(i) for i in range(gene_number) ] + ['viabilityratioDB' + str(i) for i in range(gene_number) ]
def levenshtein(a, b): """Calculates the Levenshtein distance between a and b. The code was copied from: http://hetland.org/coding/python/levenshtein.py """ n, m = len(a), len(b) if n > m: # Make sure n <= m, to use O(min(n,m)) space a, b = b, a n, m = m, n current = list(range(n + 1)) for i in range(1, m + 1): previous, current = current, [i] + [0] * n for j in range(1, n + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change = change + 1 current[j] = min(add, delete, change) return current[n]
def parity(byte): """ Return True if the number of bits that are set is even. """ return str(bin(byte)).count('1') % 2 == 0
def readable_data_size(num): """Print data size in readable format.""" for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']: if num < 1024.0: return '%.2f %s' % (num, unit) num /= 1024.0 return "%.2f %s" % (num, 'YB')
def a_criterions(x: tuple, center: tuple, old_len, new_len): """(s(k)/(s(k)-1))*d^2[x, c(k)]""" first = old_len/new_len second = (x[0] - center[0])**2 + (x[1] - center[1])**2 a = first * second return a
def expLevel(level = '3'): """ Accepts list of numbers as strings or single number (as string). Ex: expLevel(level = ['2', '3']) expLevel('6') 1 = Internship 2 = Entry Level 3 = Associate 4 = Mid-Senior 5 = Director 6 = Executive """ _ = 'f_E=' level = level.split() if(len(level) == 1): return _ + level[0] else: for i in range(len(level)): return _ + '%2C'.join(level)
def parse_exr_admin_show_platform(output): """Get all RSP/RP/LC string node names matched with the card type.""" inventory = dict() lines = output.split('\n') for line in lines: line = line.strip() if len(line) > 0 and line[0].isdigit(): node = line[:10].strip() # print "node = *{}*".format(node) node_type = line[10:34].strip() # print "node_type = *{}*".format(node_type) inventory[node] = node_type return inventory
def tokenize_bibtex(entry): """ Tokenize bibtex entry string Args: entry(str): string of a bibtex entry Returns: dict: the bibtex entry """ start= entry.find('{') + 1 token_start= start quote_level= 0 brace_level= 0 tokens= [] for i in range(start, len(entry)): if entry[i] == '"': quote_level= quote_level + 1 if entry[i] == '{': brace_level= brace_level + 1 if entry[i] == '}': brace_level= brace_level - 1 if (entry[i] == ',' and brace_level == 0) or (entry[i] == '}' and brace_level < 0) and quote_level % 2 == 0: tokens.append(entry[token_start:(i)]) token_start= i + 1 result= {} result['key']= tokens[0].strip() for i in range(1, len(tokens)): splitted= tokens[i].strip().split('=', 1) if len(splitted) == 2: key= splitted[0].strip().lower() value= splitted[1].strip()[1:-1] result[key]= value if 'year' not in result: print("No year attribute in %s" % result['key']) return result
def get_months_list(month_num=False): """Returns months""" month_names = ["january", "february", "march", "april", "may", "june"] if(month_num): return month_names[month_num - 1].title() return month_names
def int_to_little_endian(n: int, length) -> bytes: """ Takes an integer and returns the little-endian byte sequence """ return n.to_bytes(length, 'little')
def split_line_by_str(line): """Doc.""" indeces = [0] in_single_quoted_character = False in_double_quoted_string = False in_single_line_comment = False for index, char in enumerate(line): in_str = (in_single_quoted_character or in_double_quoted_string or in_single_line_comment) if in_str: if index - 1 >= 0: pre_char = line[index - 1] if (char == '\'' and pre_char != '\\' and in_single_quoted_character): in_single_quoted_character = False indeces.append(index + 1) elif (char == '"' and pre_char != '\\' and in_double_quoted_string): in_double_quoted_string = False indeces.append(index + 1) else: if char == '\'': in_single_quoted_character = True indeces.append(index) elif char == '"': in_double_quoted_string = True indeces.append(index) elif char == '/' and index + 1 < len(line): next_char = line[index + 1] if next_char == '/': in_single_line_comment = True indeces.append(index) indeces.append(len(line)) start_indeces = indeces[:-1] end_indeces = indeces[1:] index_pairs = zip(start_indeces, end_indeces) line_slices = [] for index_pair in index_pairs: line_slice = line[index_pair[0]:index_pair[1]].strip() if line_slice: line_slices.append(line_slice) return line_slices
def eq5 (A, B, C, D, E, T): """Chemsep equation 5 :param A: Equation parameter A :param B: Equation parameter B :param C: Equation parameter C :param D: Equation parameter D :param E: Equation parameter E :param T: Temperature in K""" return A + B*T + C*(T**2) + D*(T**3) + E*(T**4)
def validate_either_values(input_dict, either_values, verbose=False, throw=False): """either values have to be present in input_dict""" valid = False is_set = {} for k, v in either_values.items(): if k in input_dict and input_dict[k]: is_set[k] = True which_set = [k for k, v in is_set.items() if v] if not which_set: msg = "Neither: {} values were present in: {}".format( either_values.keys(), input_dict ) if verbose: print(msg) if throw: raise ValueError(msg) elif len(which_set) > 1: msg = "Only one of: {} can be used, but: {} were set".format( either_values.keys(), which_set ) if verbose: print(msg) if throw: raise ValueError(msg) else: valid = True return valid
def porosity_by_soniclog(delt_log=144, delt_matrix=55.5, delt_fluid=189): """Returns the porosity given the average delta t (micro-sec/ft) of an interval, fluid delta t, and the matrix delta t. The fluid is usually mud filtrate (189 micro-sec/ft). Sandstone delta t ~ 55.5 or 51.0, Limestone delta t ~ 47.5, Dolomite delta t ~ 43.5.""" return (delt_log-delt_matrix)/(delt_fluid-delt_matrix)
def method_call_if_def(obj, attr_name, m_name, default, *args, **kwargs): """Calls the provided method if it is defined. Returns default if not defined. """ try: attr = getattr(obj, attr_name) except AttributeError: return default else: return getattr(attr, m_name)(*args, **kwargs)
def parse_rsync_url(location): """Parse a rsync-style URL.""" if ':' in location and '@' not in location: # SSH with no user@, zero or one leading slash. (host, path) = location.split(':', 1) user = None elif ':' in location: # SSH with user@host:foo. user_host, path = location.split(':', 1) if '@' in user_host: user, host = user_host.rsplit('@', 1) else: user = None host = user_host else: raise ValueError('not a valid rsync-style URL') return (user, host, path)
def get_sender(msg): """Return message sender.""" return msg.split('!', 1)[0][1:]
def celsius_para_fahrenheit(c): """ Recebe uma temperatura em celsius, e retorna a temperatura em fahrenheit""" fahrenheit = (c*9/5) + 32 return fahrenheit
def is_DER_sig(sig: bytes) -> bool: """Checks if the data is a DER encoded ECDSA signature https://bitcoin.stackexchange.com/questions/92680/what-are-the-der-signature-and-sec-format :param sig: Potential signature. :type sig: bytes :return: True if the passed in bytes are an ECDSA signature in DER format. :rtype: bool """ # Header byte if sig[0] != 0x30: return False # Header byte for the r big integer if sig[2] != 0x02: return False len_r = sig[3] # Header byte for the s big integer if sig[3 + len_r + 1] != 0x02: return False len_s = sig[3 + len_r + 2] # The last extra byte is the sighash flag computed_len = 4 + len_r + 2 + len_s + 1 if len(sig) != computed_len: False return True
def show_fact_sheet_b(responses, derived): """ If any child lives with both parents, custody is shared, so Fact Sheet B is indicated. """ return any([child['child_live_with'] == 'Lives with both' for child in derived['children']])
def get_bonds(struct): """Get a list of bonds in a dot-paren structure.""" struct = struct.replace("+", "") # Positions do NOT include strand breaks bonds = [] # Will be a list of pairs (open_postion, close_postion) open_pos = [] # A FILO of open parentheses positions for pos, symb in enumerate(struct): if symb == "(": open_pos.append(pos) elif symb == ")": assert len(open_pos) != 0 start = open_pos.pop() # The most recent open-paren bonds.append( (start, pos) ) else: assert symb == ".", "Structure '%s' not in dot-paren form" % struct return bonds
def _compute_ANN_row(tree, x): """ For a given survival tree and a feature vector, traverse down the tree to find the feature vector's adaptive nearest neighbors. Parameters ---------- tree : dictionary Tree node of a decision tree. We traverse down the tree taking branches that depend on the given feature vector's values. x : 1D numpy array, shape = [n_features] Feature vector. Returns ------- output : 1D numpy array Training subject indices that are the adaptive nearest neighbors of the input feature vector. """ if 'train_indices' in tree: return tree['train_indices'] if x[tree['feature']] <= tree['threshold']: return _compute_ANN_row(tree['left'], x) else: return _compute_ANN_row(tree['right'], x)
def within(x, y, size, px, py): """Returns true if a point (px, py) is within a range (x, y, x+size, y+size).""" if(px >= x and px <= x + size): if (py >= y and py <= y + size): return True return False
def convert_label(indicator): """ Convert binary class labels to +1/-1 :param indicator: target classes :return: +1/-1 """ if indicator == 1: return +1 else: return -1
def bool_to_str(x): """Converts a bool to an empty string if False and the string '1' if True.""" return '1' if x else ''
def get_or_put(list_, value): """Inserts value in list_ if not preset, returns the index of value.""" try: return list_.index(value) except ValueError: list_.append(value) return len(list_) - 1
def supporteddestrequirements(repo): """Obtain requirements that upgrade supports in the destination. If the result of the upgrade would create requirements not in this set, the upgrade is disallowed. Extensions should monkeypatch this to add their custom requirements. """ return { 'dotencode', 'fncache', 'generaldelta', 'revlogv1', 'store', }
def write_file(filename="", text=""): """ writes a string to a text file (UTF8) and returns the number of characters written """ with open(filename, "w", encoding="utf-8") as f: f.write(text) return len(text)
def HexToByte(hexStr): """ Convert a string hex byte values into a byte string. The Hex Byte values may or may not be space separated. """ # The list comprehension implementation is fractionally slower in this case # # hexStr = ''.join( hexStr.split(" ") ) # return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \ # for i in range(0, len( hexStr ), 2) ] ) bytez = [] hexStr = ''.join(hexStr.split(" ")) for i in range(0, len(hexStr), 2): bytez.append(chr(int(hexStr[i:i+2], 16))) return ''.join(bytez)
def bitString(integral_value, min_bits=8): """ Return a string representation of a binary value Return a string representation of a binary value, padded with zeros in the higher order bits if necessary @param integral_value: integer or long integer to convert to bit string @type integral_value: int @param min_bits: minimum number of bits to display in the result (pad zeros to the left if necessary) @type min_bits: int @return: ascii string containing the binary representation of the of the input value @rtype: str """ bitstring = bin(integral_value).split('b')[1] pad_zeros = min_bits - len(bitstring) if pad_zeros > 0: bitstring = '0'*pad_zeros + bitstring return bitstring
def motifs_from_proto(motifs, is_one_dimensional=False): """ Utility function to transform motif locations back to single dimension or multi-dimension location. Parameter --------- motifs : array_like The protobuf formatted array. is_one_dimensional : boolean A flag to indicate if the original locations should be 1D. Returns ------- list : The transformed motif locations. """ out = [] for motif in motifs: tmp = {'motifs': [], 'neighbors': []} for location in motif.motifs: if is_one_dimensional: tmp['motifs'].append(location.row) else: tmp['motifs'].append((location.row, location.col)) for neighbor in motif.neighbors: if is_one_dimensional: tmp['neighbors'].append(neighbor.row) else: tmp['neighbors'].append((neighbor.row, neighbor.col)) out.append(tmp) return out
def format_number_for_display(number): """Takes a number as a float and returns it as a Spanish-formatted string.""" number = str(number).replace(".", "!").replace(",", ".").replace("!", ",") if number.endswith(",0"): return number.replace(",0", "") elif number.endswith(",00"): return number.replace(",00", "") else: return number
def _expand_global_features(B, T, g, bct=True): """Expand global conditioning features to all time steps Args: B (int): Batch size. T (int): Time length. g (Variable): Global features, (B x C) or (B x C x 1). bct (bool) : returns (B x C x T) if True, otherwise (B x T x C) Returns: Variable: B x C x T or B x T x C or None """ if g is None: return None g = g.unsqueeze(-1) if g.dim() == 2 else g if bct: g_bct = g.expand(B, -1, T) return g_bct.contiguous() else: g_btc = g.expand(B, -1, T).transpose(1, 2) return g_btc.contiguous()
def clean_adduct(adduct, add_brackets=False): """ Harmonizes adducts. :param adduct: :param add_brackets: add [M+H]+ brackets that are removed during clean up (True or False) :return: M-all losses+all additions CHARGE """ new_adduct = adduct new_adduct = new_adduct.replace("[", "") new_adduct = new_adduct.replace("]", "") new_adduct = new_adduct.replace(" ", "") charge = "" charge_sign = "" for i in reversed(range(len(new_adduct))): if new_adduct[i] == "+" or new_adduct[i] == "-": charge_sign = new_adduct[i] elif new_adduct[i].isdigit(): charge = new_adduct[i] + charge else: new_adduct = new_adduct[0:i + 1] break parts = new_adduct.split("+") positive_parts = [] negative_parts = [] for p in parts: sp = p.split("-") positive_parts.append(sp[0]) for n in sp[1:]: negative_parts.append(n) # sort m_part = positive_parts[0] positive_parts = positive_parts[1:] positive_parts.sort() negative_parts.sort() new_adduct = m_part if len(negative_parts) > 0: new_adduct += "-" + "-".join(negative_parts) if len(positive_parts) > 0: new_adduct += "+" + "+".join(positive_parts) if add_brackets: new_adduct = "[" + new_adduct + "]" new_adduct += charge + charge_sign return new_adduct
def get_signature(user_agent, query_string, client_ip_address, lang): """ Get cache signature based on `user_agent`, `url_string`, `lang`, and `client_ip_address` Return `None` if query should not be cached. """ if "?" in query_string: location = query_string.split("?", 1)[0] else: location = query_string if location.startswith("http://"): location = location[7:] elif location.startswith("https://"): location = location[8:] if ":" in location: return None signature = "%s:%s:%s:%s" % \ (user_agent, query_string, client_ip_address, lang) print(signature) return signature
def check_unique(nums): """Solution to exercise C-1.15. Write a Python function that takes a sequence of numbers and determines if all the numbers are different from each other (that is, they are distinct). """ return len(nums) == len(set(nums))
def _build_split_command(input_file, output_audio_file): """ Creates a list for each bit of the command line to run since it is a list the command line is secure and can run with file names that are not formatted correctly. or doesn't need any explicit formatting. Downmixing to single channel audio(mono) without losing any data Parameters ---------- input_file : str input video file name and path output_audio_file : str output audio file name and path Returns --------- _CMD command line to pass to the subprocess Examples ---------- The command created spits the video file into an audio file, The file paths are kept same. The command goes like this `ffmpeg -y -i input.mkv output.wav` '-y' : FFmpeg option for 'yes override'. Override the output file it exists '-i' : FFmpeg option for 'input file'. Input file can be multiple """ return [ 'ffmpeg', '-y', '-i', "'" + str(input_file) + "'", '-ac', '1', "'" + str(output_audio_file) + "'" ]
def decode_extra_distance(bits, dist): """Decode extra bits for a match distance symbol.""" assert dist <= 29 if dist >= 4: extra = (dist - 2) / 2 if extra: ebits = bits.read(extra) dist = 2**(extra+1) + ((dist % 2) * (2**extra)) + ebits dist += 1 return dist
def sortedfd(fd): """sortedfd returns a sorted copy of a frequency dictionary (as item tuples). """ return sorted(fd.items(), key=lambda i: i[1], reverse=True)
def df(x, xValues, dValues): """Derivative using partial differences second order""" h = xValues[1] - xValues[0] theta = (x - xValues[0]) / h return (dValues[1] + 0.5 * (2*theta - 1)*dValues[2]) / h
def rhosat(phi, sw, rhomin, rhow, rhohc): """ Density of partially saturated rock. """ a = rhomin * (1 - phi) # grains b = rhow * sw * phi # brine c = rhohc * (1 - sw) * phi # hydrocarbon return a + b + c
def tile_axes(n): """ Determine number of columns by finding the next greatest square, then determine number of rows needed. """ from numpy import ceil, sqrt cols = int(ceil(sqrt(n))) rows = int(ceil(n/float(cols))) return cols, rows
def getPointerAddress(data): """ getPointerAdrress(data): Return the address of a pointer """ pointer1 = int(data[0]) & 0b00111111 pointer2 = int(data[1]) & 0b11111111 retval = (256 * pointer1) + pointer2 return(retval)
def _not_exhausted(last_fetched): """Check if the last fetched tasks were the last available.""" return len(last_fetched) != 0
def remove_stopwords(text, sw): """Remove words that do not add to the meaning; simplifies sentences""" return [w for w in text if w not in sw]
def get_url(keyword): """Gets the Google image search URL with the given keyword. :param keyword: (str) String to use as search parameter when getting str. :return: (str) URL to google images with given keyword correctly formated. """ _keyword = keyword.replace(' ', '%20').replace(',', '') image_url = f'https://google.com/search?q={_keyword}&tbm=isch' return image_url
def make_histogram(s): """Make a map from letters to number of times they appear in s. s: string Returns: map from letter to frequency """ hist = {} for x in s: hist[x] = hist.get(x, 0) + 1 return hist
def check_range_modulo_zero(j, number): """ If j mod number is 0, trying to add to j factor, mutually simple to number """ if number != 0: if j % number == 0: j *= (1.5 * number) return j
def create_address_argument(ports): """ >>> create_address_argument([8080, 8081]) '"127.0.0.1:8080,127.0.0.1:8081"' """ is_first = True argument = '"' for port in ports: if is_first: is_first = False else: argument += ',' argument += "127.0.0.1:{}".format(port) argument += '"' return argument
def get_processed_job_sequence(procsd_jb_name, all_jbs_in_wrkflw): """ Recursively iterate over jobs in the workflow to generate an ordered list of parent jobs """ jobs_in_sequence = [] job_dict = next((jd for jd in all_jbs_in_wrkflw \ if isinstance(jd, dict) and procsd_jb_name == list(jd)[0]), None) if job_dict: # Find all parent jobs, recurse to find their respective ancestors parent_jobs = job_dict[procsd_jb_name].get('requires', []) for pjob in parent_jobs: jobs_in_sequence += get_processed_job_sequence(pjob, all_jbs_in_wrkflw) return jobs_in_sequence + [procsd_jb_name]
def folder_path_in_path_variable(target_path, path_list): """ Check if the target path is in a list of paths (looks for path\ also) """ return target_path in path_list or target_path + '\\' in path_list
def probability(data: dict) -> dict: """ Function to calculate P(omega) :param dict data: dict to get data from :return: """ counter = {key: len(val) for key, val in data.items()} summary = sum(counter.values()) probabi = {key: val/summary for key, val in counter.items()} return probabi
def convert_time_string_to_wolframalpha_query_url(time_string: str) -> str: """ Converts a time string to a Wolframalpha URL Returns: - The converted URL that links to the Wolframalpha query results """ formatted_query = time_string.replace(" ", "+") return f"https://www.wolframalpha.com/input/?i={formatted_query}"
def dist(s1, s2): """Return the Hamming distance between equal-length sequences""" distance = 0 if(len(s1) == len(s2)): return sum(el1 != el2 for el1, el2 in zip(s1, s2)) elif len(s1) < len(s2): for i in range(len(s1)): if(s1[i] != s2[i]): distance += 1 distance += len(s2) - len(s1) return distance elif len(s1) > len(s2): for i in range(len(s2)): if(s1[i] != s2[i]): distance += 1 distance += len(s1) - len(s2) return distance
def InsiderStrip(i: int, j: int, k: int, diff=0): """judge whether the loc is in the k-band area Args: i: loc in y j: loc in x k: kband size diff: Sequence length difference Returns: True or False """ return (-k <= j - i <= k + diff)
def _get_title(properties): """From a list of properties, retrieve the title property and return the value. """ for prop in properties: if prop.name == "title": return prop.value # If there's no title property, return ` string return ""
def _calculate_chi_squared(source_freq, target_prob, source_len): """A measure of the observed frequency of the symbol versus the expected frequency. If the value is 0 then the texts are exactly alike for that symbol. """ expected = source_len * target_prob return (source_freq - expected)**2 / expected
def point_in_rectangle(point, rect_bottom_corner, rect_dimensions): """Check if a specific point is within a rectangle.""" rect_top_corner = (rect_bottom_corner[0] + rect_dimensions[0], rect_bottom_corner[1] + rect_dimensions[1]) return ( point[0] >= rect_bottom_corner[0] and point[0] <= rect_top_corner[0] and point[1] >= rect_bottom_corner[1] and point[1] <= rect_top_corner[1] )