content
stringlengths
42
6.51k
def find_index_in_intent_list(intents,contexts): """ """ possible_indexes = set() for index,intent in enumerate(intents): for context in contexts: if set(intent)==set(context): possible_indexes.add(index) return list(possible_indexes)
def _n_particles(molecules): """ Count the number of meta_molecule nodes in the topology. """ return sum(map(len, molecules))
def zip_lsts(lsts): """ zip a list of lists """ lengths = [len(lst) for lst in lsts] assert len(list(set(lengths))) == 1 # assert that the lsts have the same lengths zipped_lst = [list(tp) for tp in list(zip(*lsts))] return zipped_lst
def real(x): """ the real part of x """ if isinstance(x, complex): return x.real else: return x
def isSimpleEscapeSequence(symbols): """Checks that given symbols are simple escape sequence. Reference: n1570 6.4.4.4""" if len(symbols) != 2 or symbols[0] != '\\': return False return symbols[1] in ("'", '"', '?', '\\', 'a', 'b', 'f', 'n', 'r', 't', 'v')
def get_message_as_text(msg): """ Creates displayable message in correct form from a message of the API. There will be no translations. @param msg: Message returned by the API @type msg: LocalizableMessage """ if not msg: return None return msg.default_message % msg.args
def _intersect(rect1, rect2): """ Return True if the two rectangles rect1 and rect2 intersect. """ x1, y1, w1, h1 = rect1 x2, y2, w2, h2 = rect2 return (x1 + w1 > x2 and x2 + w2 > x1) and (y1 + h1 > y2 and y2 + h2 > y1)
def crossproduct(a, b): """Return cross product (a X b) Parameters ---------- a : 3-element list of floats first set of values in cross-product calculation b : 3-element list of floats second set of values in cross-product calculation Returns ------- c : 3-element list of floats result of cross-product calculation """ c = [0]*3 c[0] = a[1]*b[2] - a[2]*b[1] c[1] = a[2]*b[0] - a[0]*b[2] c[2] = a[0]*b[1] - a[1]*b[0] return c
def get_order(keys, values): """Returns order dict from keys and values""" return dict(zip(keys, values))
def ranges_to_indices(range_string): """Converts a string of ranges to a list of indices""" indices = [] for span in range_string.split('/'): if ':' in span: start_idx, stop_idx = [int(idx) for idx in span.split(':')] stop_idx += 1 # add 1 since end index is excluded in range() indices.extend(list(range(start_idx, stop_idx))) else: indices.append(int(span)) return indices
def rgb(rgb_colors): """ Return a tuple of integers, as used in AWT/Java plots. Parameters ---------- rgb_colors : list Represents a list with three positions that correspond to the percentage red, green and blue colors. Returns ------- tuple Represents a tuple of integers that correspond the colors values. Examples -------- >>> from pymove.visualization.visualization import rgb >>> rgb([0.6,0.2,0.2]) (51, 51, 153) """ blue = rgb_colors[0] red = rgb_colors[1] green = rgb_colors[2] return int(red * 255), int(green * 255), int(blue * 255)
def jd2mjd(jd): """ Converts a Julian Date to a Modified Julian Date Parameters ---------- jd : float (any numeric type) A julian date Returns ------- mjd : float The Modified Julian Date (MJD) calculated from the input Julian Date Examples -------- >>> jd2mjd(2455581.40429) 55580.90429 """ return float(jd - 2400000.5)
def reward(payoffs, action, p): """ >>> reward([2, -1, 1, 0], 1, 1) 2 >>> reward([2, -1, 1, 0], 1, 0.5) 0.5 >>> reward([2, -1, 1, 0], 0, 0.5) 1.0 """ return action * p * payoffs[0] + action * (1-p) * payoffs[1] + (1 - action) * payoffs[2]
def get_value_from_labels(json_object: dict, field: dict) -> dict: """ Given a json_object and a field definition, return the value of the first in a list of field names which exists in the json_object.""" value = {} from_labels = field.get('fromLabels', '') for label_name in from_labels: if label_name in json_object: value = json_object[label_name] break return value
def tofloat(a): """ parse string list/tuple to float list """ b = [] for i in a: b.append(float(i)) return b
def is_casava_v180_or_later(header_line): """Check if the header looks like it is Illumina software post-casava v1.8 Parameters ---------- header_line : bytes A header line Returns ------- bool ``True`` for if casava v1.8+, otherwise ``False`` Examples -------- >>> from skbio.util import is_casava_v180_or_later >>> print(is_casava_v180_or_later('@foo')) False >>> id_ = '@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0' >>> print(is_casava_v180_or_later(id_)) True """ if not header_line.startswith(b'@'): raise ValueError("Non-header line passed in!") fields = header_line.split(b':') return len(fields) == 10 and fields[7] in b'YN'
def get_attachment_name(attachment_name: str) -> str: """ Retrieve attachment name or error string if none is provided. Args: attachment_name (str): Attachment name to retrieve. Returns: str: The attachment file name or 'xsoar_untitled_attachment' by default. """ if attachment_name is None or attachment_name == "": return "xsoar_untitled_attachment" return attachment_name
def get_nearest_neighbor_coupling_list(width, height, directed=True): """Returns a coupling list for nearest neighbor (rectilinear grid) architecture. Qubits are numbered in row-major order with 0 at the top left and (width*height - 1) at the bottom right. If directed is True, the coupling list includes both [a, b] and [b, a] for each edge. """ coupling_list = [] def _qubit_number(row, col): return row * width + col # horizontal edges for row in range(height): for col in range(width - 1): coupling_list.append((_qubit_number(row, col), _qubit_number(row, col + 1))) if directed: coupling_list.append((_qubit_number(row, col + 1), _qubit_number(row, col))) # vertical edges for col in range(width): for row in range(height - 1): coupling_list.append((_qubit_number(row, col), _qubit_number(row + 1, col))) if directed: coupling_list.append((_qubit_number(row + 1, col), _qubit_number(row, col))) return coupling_list
def _sort_bam_cmd(i_bam_fn, o_bam_fn, nproc=8, tmp_dir=None): """Sort i_bam_fn to o_bam_fn""" cmd = 'samtools sort %s -o %s --threads %s' % (i_bam_fn, o_bam_fn, int(nproc)) return cmd + ' -T {}'.format(tmp_dir) if tmp_dir else cmd
def get_chunks(l, n, max_chunks=None): """ Returns a chunked version of list l with a maximum of n items in each chunk :param iterable[T] l: list of items of type T :param int n: max size of each chunk :param int max_chunks: maximum number of chunks that can be returned. Pass none (the default) for unbounded :return: list of chunks :rtype: list[T] """ if n is None: return [l] if n <= 0: raise ValueError('get_chunk: n must be a positive value. Received {}'.format(n)) if max_chunks is not None and max_chunks > 0: n = max(max_chunks, int(float(len(l)) / float(n)) + 1) return [l[i:i+n] for i in range(0, len(l), n)]
def split_parts(msg): """Splits a key=value pair into a tuple.""" index = msg.find("=") return (msg[:index], msg[index+1:])
def has_bad_allele(all_alleles): """Determine if the list of alleles contains an allele that is not one of T, C, G, A""" bad_allele = False for a in all_alleles: if len(a) != 1 or a not in 'TCGA': return True return False
def repos_join(base, path): """Join two repos paths. This generally works for URLs too.""" if base == '': return path elif path == '': return base elif base[len(base)-1:] == '/': return base + path else: return base + '/' + path
def _composite_device_key(device): """! Given two composite devices, return a ranking on its specificity @return if no mount_point, then always 0. If mount_point but no serial_port, return 1. If mount_point and serial_port, add the prefix_index. """ rank = 0 if 'mount_point' in device: rank += 1 if device['serial_port'] is not None: rank += device['prefix_index'] return rank
def gffsorting(gff_file, outgff_file): """ """ inputs = [gff_file] outputs = [outgff_file] options = { 'cores': 1, 'memory': '4g', 'account': 'NChain', 'walltime': '01:00:00' } spec = ''' python GFFsort.py -o {outfile} {infile} '''.format(infile=gff_file, outfile=outgff_file) return inputs, outputs, options, spec
def _filter(regex, expr): """ Build an `filter(<regex>, <expr>)` type query expression. `regex` is a regex matched on the rule names from `expr` `expr` is a query expression of any supported type. """ return "filter('{}', {})".format(regex, expr)
def split_writable_text(encoder, text, encoding): """Splits off as many characters from the begnning of text as are writable with "encoding". Returns a 2-tuple (writable, rest). """ if not encoding: return None, text for idx, char in enumerate(text): if encoder.can_encode(encoding, char): continue return text[:idx], text[idx:] return text, None
def build_response(data, qparams, func, authorized_datasets=[]): """"Fills the `response` part with the correct format in `results`""" response = { 'results': func(data, qparams, authorized_datasets), 'info': None, # 'resultsHandover': None, # build_results_handover # 'beaconHandover': None, # build_beacon_handover } # build_error(qparams) return response
def get_labels(res): """ Get the record labels for the given track. **Parameters** - `res`: list of string of album record labels, e.g. ['Universal/Warner Bros.', 'None'] **Returns** A dictionary of record label, `{'album_label_{num}': label_name}`. """ if res != None: filter_none = [ label_string for label_string in res if label_string != None if label_string != "None" ] joined_str = "/".join(filter_none) labels = [ label.strip() for label in joined_str.split("/") if label != None ] return ( {f"album_label_{i + 1}": label for i, label in enumerate(labels)} if labels != None else {"album_label_1": None} ) return {"album_label_1": None}
def process_submit_job_call(call): """Process a call to boto3.submit_job to index by task name """ try: # py 3.6, 3.7 kw = call[1] except KeyError: # py >3.7 kw = call.kwargs return { kw['jobName']: { 'dependsOn': [dep['jobId'] for dep in kw['dependsOn']], 'containerOverrides': kw['containerOverrides'] } }
def yang_filter(train_docs, test_docs, unused_docs): """ Splits the data according to "A Study on Thresholding Strategies for Text Categorization" - Yang, Timing (2001). This is a slightly modified version of "ModApte" split. Main difference (quote from the author): "[..] eliminating unlabelled documents and selecting the categories which have at least one document in the training set and one in the test set. [..]" Args: train_docs (list[Doc]): annotated articles for training test_docs (list[Doc]): annotated articles for testing unused_docs (list[Doc]): unused docs according to the "ModApte" split Returns: train_docs (list[Doc]): annotated articles for training test_docs (list[Doc]): annotated articles for testing unused_docs (list[Doc]): unused docs according to the "ModApte" AND Yang's extra filter """ # Get labels that don't appear in _both_ train and test train_labels = set([l for d in train_docs for l in d.labels]) test_labels = set([l for d in test_docs for l in d.labels]) bad_labels = train_labels ^ test_labels # Remove all bad labels from documents for doc in train_docs + test_docs: doc.labels = [l for l in doc.labels if l not in bad_labels] # Find all docs that have no labels bad_docs = [d for d in train_docs + test_docs if not d.labels] bad_docs_set = set(bad_docs) # Remove them from train/test train_docs = [d for d in train_docs if d not in bad_docs_set] test_docs = [d for d in test_docs if d not in bad_docs_set] unused_docs = unused_docs + bad_docs return train_docs, test_docs, unused_docs
def merge_dicts(a, b): """combine two dictionaries, assuming components are arrays""" result = a for k, v in b.items(): if k not in result: result[k] = [] result[k].extend(v) return result
def adjacent(cell, size=None): """ Return the cooridinates of the cells adjacent to a given cell size: [nlines, ncolumns] to avoid returning out-of-bound coordinates """ if size==None: return [ (cell[0]-1, cell[1] ), (cell[0] , cell[1]-1), (cell[0]+1, cell[1] ), (cell[0] , cell[1]+1) ] return [adj for adj in adjacent(cell) if (adj[0]>=0 and adj[0]<size[0] and adj[1]>=0 and adj[1]<size[1])]
def hitFinder(target, r1, r2): """ Barcode added by Rea's protocol is found on the 5' end of the read. That barcode can be found in r1 if the read is short enough, but you'll definitely see it in read 2 since that starts from the 5' end. Using the illumina platform, r1 will need to be reverse complemented to be correct and r2 will need to not be. This function finds all reads with the barcode in r1 and then returns the corresponding r2 reads. :param target: Barcode to be found :param r1: list of reads produced by fastqparser :param r2: list of reads produced by fastqparser :return: list of reads """ hits = [] for i in range(0, len(r2)): if target in r2[i]: hits.append(r1[i]) return hits
def for_language(cell, languages=set()): """Check if a given paragraph is in an expected language""" return cell.get("_lang") in languages
def map_data(field, input_map=False, missing=False): """Returns the subject of the condition in map format when more than MAX_ARGS_LENGTH arguments are used. """ if input_map: if missing: return "data.get('%s')" % field else: return "data['%s']" % field return field
def npm_download_url(namespace, name, version, registry='https://registry.npmjs.org'): """ Return an npm package tarball download URL given a namespace, name, version and a base registry URL. For example: >>> expected = 'https://registry.npmjs.org/@invisionag/eslint-config-ivx/-/eslint-config-ivx-0.1.4.tgz' >>> assert npm_download_url('@invisionag', 'eslint-config-ivx', '0.1.4') == expected >>> expected = 'https://registry.npmjs.org/angular/-/angular-1.6.6.tgz' >>> assert npm_download_url('', 'angular', '1.6.6') == expected >>> expected = 'https://registry.npmjs.org/angular/-/angular-1.6.6.tgz' >>> assert npm_download_url(None, 'angular', '1.6.6') == expected """ if namespace: ns_name = f'{namespace}/{name}' else: ns_name = name return f'{registry}/{ns_name}/-/{name}-{version}.tgz'
def check_view_shape(x): """Check view function input shape""" if not x: raise ValueError("The shape variable should not be empty") if isinstance(x[0], tuple): if len(x) != 1: raise ValueError(f"Only one tuple is needed, but got {x}") x = x[0] return x
def as_bytes(s): """ Convert an unicode string to bytes. :param s: Unicode / bytes string :return: bytes string """ try: s = s.encode("utf-8", 'replace') except (AttributeError, UnicodeDecodeError): pass return s
def twoSum(nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ # for i in range(0, len(nums) - 1): # for j in range(i + 1, len(nums)): # if nums[i] + nums[j] == target: # return [i, j] # return [] # for i in range(len(nums)): # if target - nums[i] in nums: # j = nums.index(target - nums[i]) # if i != j: # return [i, nums.index(target - nums[i])] # return [0, 0] d = {} for i in range(len(nums)): if nums[i] not in d: d[target - nums[i]] = i else: return [d[nums[i]], i] return [-1, -1]
def neon(number) -> bool: """ Takes a number as input and checks whether the given number is Neon Number or not. """ square, sum = number**2, 0 while square > 0: d = square % 10 sum += d square //= 10 return True if(sum == number) else False
def sequence_termnum(nth,first_term,common_difference): """Usage: Find the term number in a sequence""" return ((nth-first_term)/common_difference)+1
def object_name(obj): """Get the qualified name of an object. This will obtain both the module name from `__module__` and object name from `__name__`, and concatenate those with a '.'. Examples: >>> from math import sin >>> object_name(sin) 'math.sin' >>> def f(x): ... return x*x ... >>> object_name(f) '__main__.f' To have a qualified name, an object must be defined as a class or function in a module (``__main__`` is also a module). A normal instantiated object does not have a qualified name, even if it is defined and importable from a module. Calling |object_name| on such an object will raise :py:exc:`AttributeError`. .. |object_name| replace:: :py:func:`object_name`""" return obj.__module__ + '.' + obj.__qualname__
def min_ind_of_anch(anchor_info): """ Finds the index of min ID. """ anch_id = list(anchor_info.keys()) min_id_ind = anch_id.index(min(anch_id)) return min_id_ind
def getWordPattern(word: str) -> str: """Get word pattern Returns a string of the pattern form of the given word. Args: word: String containing word to convert into word pattern. Example: >>> import pythontutorials.books.CrackingCodes.Ch17.makeWordPatterns as makeWordPatterns >>> makeWordPatterns.getWordPattern('DUSTBUSTER') '0.1.2.3.4.1.2.3.5.6' Returns: String containing word pattern. """ word = word.upper() nextNum = 0 letterNums = {} wordPattern = [] for letter in word: if letter not in letterNums: letterNums[letter] = str(nextNum) nextNum += 1 wordPattern.append(letterNums[letter]) return '.'.join(wordPattern)
def get_module_by_name(top, name): """Search in top["module"] by name """ module = None for m in top["module"]: if m["name"] == name: module = m break return module
def shrink_to_fit(column_sizes, terminal_width): """ If the total size of all columns exceeds the terminal width, then we need to shrink the individual column sizes to fit. In most tables, there are one or two columns that are much longer than the other columns. We therefore tailor the shrinking algorithm based on this principle. The algorithm is as follows: 1) Truncate the longest column until either the columns all fit in the terminal width or the size of the truncated column is equal to the next longest column. 2) If the columns fit from truncating we are done. 3) If the columns do not fit, shrink the now equally sized columns 1 char at a time until the width fits or these columns equal size of the next smallest column. 4) Repeat steps 2 and 3 successively until a fit is found. Note that there is the pathological case that the terminal is smaller than a single character for all columns. Ignore this for now. The only way to handle it would be to print every column on it's own line and truncate them to fit. This may be a useful enhancement to make later. """ total_size = sum(column_sizes) if total_size <= terminal_width: return column_sizes # Put the columns in sorted order (largest to smallest) sorted_sizes = sorted(column_sizes, reverse=True) # Find the index of each sorted column size in the original list and store it # Zero out the value in the original list so that we cover duplicate values, since list.index(val) # only finds the first instance. indexes = [] for size in sorted_sizes: index = column_sizes.index(size) indexes.append(index) column_sizes[index] = 0 # Shrink the sorted columns until they fit the terminal width while total_size > terminal_width: largest = sorted_sizes[0] num_largest_columns = sorted_sizes.count(largest) if num_largest_columns != len(sorted_sizes): next_largest = sorted_sizes[num_largest_columns] else: # All columns are the same size, so just shrink each one until they fit next_largest = 0 to_remove = total_size - terminal_width gap = largest - next_largest if gap * num_largest_columns > to_remove: # We can resize in this step and we are done to_remove_per_column = to_remove / num_largest_columns remainder = to_remove % num_largest_columns for i in range(num_largest_columns): sorted_sizes[i] = largest - to_remove_per_column for i in range(remainder): sorted_sizes[i] = sorted_sizes[i] - 1 else: # We need to remove the max number of chars until we get to the next largest size then # try again for i in range(num_largest_columns): sorted_sizes[i] = next_largest total_size = sum(sorted_sizes) # Put the shrunken column sizes in their proper index locations for i in range(len(column_sizes)): index = indexes[i] column_sizes[index] = sorted_sizes[i] return column_sizes
def grab_routers(nickName): """ Grab a list of routers based on checking for identifying information. The identifying information depends on the device naming convention. For example, for il-chi-x690-c1 you would check for 'c' in the last index after splitting on the '-'. """ if 'r' in nickName.split('-')[-1] or 'c' in nickName.split('-')[-1]: # Grab r(outer) or c(ore) is_router = True else: is_router = False return is_router
def update_moments(n, x, m1, m2): """Add the n'th sample, x, to the 1st and 2nd moments, m1 and m2. E.g. on the first sample, m1 = x1, m2 = 0 on the second sample, use m1,m2 = update_moments(2, x2, m1, m2) m1 is the mean, m2 is the second moment sample variance = m2/(n-1), population variance = m2/n Parameters: n : scalar, sample number (for the first sample, n = 1) x : scalar or array_like, sample m1 : as x, the previous mean m2 : as x, the previous second moment Returns: m1, m2 : updated mean and second moment See: Welford, B.P. "Note on a Method for Calculating Corrected Sums of Squares and Products" Technometrics Vol. 4, No. 3 (Aug 1962), pp. 419-420 https://dx.doi.org/10.2307/1266577 """ #use Welford's algorithm to maintain stability delta = x - m1 # difference from previous mean m1 += delta/n m2 += delta*(x-m1) return m1, m2
def __make_worldfile(affine,file_path:str,verbose:bool): """ Note: to future maintainters there is currently only 1 filetype in the exclusion list (gtiff) if other filetypes that use header files are found, please add them to the exclusion list. Inputs: affine: affine transform object file_path (str): file path of parent image (must include extension) Outputs: None: results in a world file being created and returns nothing Sources; per guidlines: http://webhelp.esri.com/arcims/9.3/General/topics/author_world_files.htm Example World file format: 20.17541308822119 = A 0.00000000000000 = D 0.00000000000000 = B -20.17541308822119 = E 424178.11472601280548 = C 4313415.90726399607956 = F """ splitfp = file_path.rsplit('.',1) #ideally file_path is of form {filename}.{fmt} #splitfp should ideally be ['filename','fmt'] if len(splitfp) >= 2: #checks for ideal case filename,fmt = splitfp[0],splitfp[1] else: #if there is no extension filename = splitfp[0] fmt = '' if filename == '': #if file name ends up being empty string something went wrong print(f'world file could not be created for: {file_path} | filename of zero length generated') if verbose else None return None if fmt.lower() in ('gtiff',): #Check exclusion list (files that use header to store world data) print('No world file generated: file type is in exclusion list') if verbose else None return None elif len(fmt) >= 3: #Uses 1st & 3rd letters of extension f1,f2 = fmt[0],fmt[2] fmt = f'{f1}{f2}w' file_path = f'{filename}.{fmt}' elif len(fmt) == 0: #if image has no extension, w is appended to file name (per guidelines) file_path = f'{filename}w' else: #Append 'w' without any other modification fmt = f'{fmt}w' file_path = f'{filename}.{fmt}' data = f'{affine.a}\n{affine.d}\n{affine.b}\n{affine.e}\n{affine.c}\n{affine.f}' with open(file_path,'w') as worldfile: #Create world file worldfile.write(data) print(f'world file create: {file_path}') if verbose else None
def getColPermutations(possible_columns): """ Get all possible combinations given a list of column names :return: Given Input = [a,b,c] Then, Output= [ [a], [b], [c], [a,b], [a,c], [b,c] ] """ permutations = {col: 1 for col in possible_columns} for perm_size in range(len(possible_columns)-1): for permutation in list(permutations.keys()): tokens_in_perm = permutation.split(':') if len(tokens_in_perm) == perm_size: tokens_in_perm.sort() for col in possible_columns: if col in tokens_in_perm: continue new_perm = tokens_in_perm + [col] new_perm.sort() new_perm_string = ':'.join(new_perm) permutations[new_perm_string] = 1 ret = [perm.split(':') for perm in list(permutations.keys())] return ret
def easy_helloname(a): """Takes in a string representing a name and returns a new string saying hello in a very specific format, e.g., if the name is 'Dave', it should return 'Hello, Dave!'""" return 'Hello, {}!'.format(a)
def modelPropertiesDictionary(sql_row_list): """ modelPropertiesDictionary(sql_row_list) transforms a row gotten via SQL request (list), to a dictionary affects .get_models_list.py """ properties_dictionary = \ { "id": sql_row_list[0], "name": sql_row_list[1], "last_deploy_timestamp": sql_row_list[2], "active_version": sql_row_list[3], "build_id": sql_row_list[4] }; return properties_dictionary;
def append_naming_data(scenario_groups, naming_data): """ Append naming data to match bdda naming conventions :param scenario_groups: :param naming_data: :return: """ for scenario_group in scenario_groups: for topic in scenario_group.image_topics: naming_data[ scenario_group.get_sequence_index_for_topic(topic) ] = scenario_group.get_naming_data_for_topic(topic) return naming_data
def isMultipleTagsInput(item): """ Returns True if the argument datatype is not a column or a table, and if it allows lists and if it has no permitted value. This function is used to check whether the argument values have to be delimited by the null character (returns True) or not. :param item: Table argument. """ return item.get('datatype', 'STRING') in ['STRING','DOUBLE','INTEGER','DRIVER','SQLEXPR', 'LONG']\ and item.get('allowsLists', False)\ and not item.get('permittedValues', [])
def f_score(precision, recall, beta=1): """calculate the f-score value. Args: precision (float | torch.Tensor): The precision value. recall (float | torch.Tensor): The recall value. beta (int): Determines the weight of recall in the combined score. Default: False. Returns: [float]: The f-score value. """ score = (1 + beta ** 2) * (precision * recall) / ( (beta ** 2 * precision) + recall) return score
def get_duplicate_board(board): """ Makes a duplicate of the board so that we can test moves without changing the actual board """ duplicate = [] for part in board: duplicate.append(part) return duplicate
def score_merge(ts, upvotes): """ Merge the ts and upvotes """ return ts + 1000 * upvotes
def normalizeBoolean(value): """ Normalizes a boolean. * **value** must be an ``int`` with value of 0 or 1, or a ``bool``. * Returned value will be a boolean. """ if isinstance(value, int) and value in (0, 1): value = bool(value) if not isinstance(value, bool): raise ValueError("Boolean values must be True or False, not '%s'." % value) return value
def split_list(l, break_pts): """returns list l split up into sublists at break point indices""" l_0 = len(l) sl = [] # Return a list containing the input list if no breakpoints indices selected if len(break_pts) == 0: return [l] # Else splits the list and return a list of sub lists. ADJUST SO IT'S NOT BP INDICES BUT RATHER LOCATION VALUES? else: for brk in break_pts: delta_l = l_0 - len(l) sl.append(l[:brk - delta_l]) l = l[brk - delta_l:] sl.append(l) return sl
def detect(lang: str or int): """ Tries to assume what language a user wants using limited input. May be wrong sometimes. Arguments: lang (str/int) - A language name or language ID. """ # note: this is a generalized thing if type(lang) == str: lang = lang.lower() elif type(lang) == int: return lang elif lang == 'c': return 4 elif lang == 'bash': return 1 elif lang == 'cpp' or 'c++': return 10 elif lang == 'csharp' or 'c#': return 16 elif lang == 'clojure' or 'clj': return 18 elif lang == 'crystal': return 19 elif lang == 'elixir': return 20 elif lang == 'erlang': return 21 elif lang == 'go': return 22 elif lang == 'haskell': return 23 elif lang == 'insect': return 25 elif lang == 'java': # I would've returned 26, but there are some things that dont work well with OpenJ9 # plus Java 8 works with basically anything anyways return 27 elif lang == 'js' or 'javascript': return 29 elif lang == 'ocaml': return 31 elif lang == 'octave': return 32 elif lang == 'pascal': return 33 elif lang == 'python' or 'py': return 34 elif lang == 'ruby': return 38 elif lang == 'rust': return 42
def is_loan(x): """ Tests for loan words. These are in cognate set M, M1, M2, or A or L etc """ if x.startswith("M"): return True elif x in ("A", "L", "P", "T"): return True return False
def get_content_type(filename): """ Figure out the content type to use from a filename. :param filename: string :return: string like 'text/html' :type filename: string :rtype: string """ types = { 'ico': 'image/x-icon', 'png': 'image/png', 'html': 'text/html', 'css': 'text/css', 'js': 'application/javascript', 'txt': 'text/plain', 'xml': 'text/xml', 'svg': 'image/svg+xml', 'woff': 'application/font-woff', 'eot': 'application/vnd.ms-fontobject', 'ttf': 'application/x-font-ttf', } ext = filename.rsplit('.', 1)[-1] if ext not in types: return None return types[ext]
def get_from_series_expo(column, idx, default): """ Get the values from the expo. This is a helper function only. """ if isinstance(column, list): if idx < len(column): return column[idx] return default return column.get(idx, default)
def xpath_lower_case(context, values): """Return lower cased values in XPath.""" return [v.lower() for v in values]
def first_dict(result_list): """Return the first dict from a p4 result list.""" for e in result_list: if isinstance(e, dict): return e return None
def _add_indent(script, indent=2): """ Indent list of script with specfied number of spaces """ if not isinstance(script, list): script = [script] indent = ' ' * indent return [indent + s for s in script]
def evaluate_precision(tp: int, fp: int) -> float: """Precision, aka Positive Predictive Value (PPV). $PPV=\dfrac{TP}{TP + FP}$ Args: tp: True Positives fp: False Positives """ try: return tp / (tp + fp) except ZeroDivisionError: return 0.0
def _extract_phospho_islands(phospho, start, end): """ phospho = [(3, 4),]; start = [1, ]; end = [5, ] """ last_lo_site = 0 last_hi_site = 0 last_seq_start = 0 last_seq_end = 0 island_id = 0 islands = {} phospho_sequence_positions = sorted(set(zip(phospho, start, end))) for phos_sites, seq_start, seq_end, in phospho_sequence_positions: lo_site = min(phos_sites) hi_site = max(phos_sites) if any([(seq_start <= last_hi_site and seq_end >= last_lo_site), (last_seq_start <= hi_site and last_seq_end >= lo_site)]): if lo_site < last_lo_site: last_lo_site = lo_site islands[island_id]['lo'] = last_lo_site if hi_site > last_hi_site: last_hi_site = hi_site islands[island_id]['hi'] = last_hi_site if seq_start < last_seq_start: last_seq_start = seq_start islands[island_id]['start'] = last_seq_start if seq_end > last_seq_end: last_seq_end = seq_end islands[island_id]['end'] = last_seq_end else: # Generate new Island island_id += 1 last_lo_site = lo_site last_hi_site = hi_site last_seq_start = seq_start last_seq_end = seq_end islands[island_id] = { 'lo': last_lo_site, 'hi': last_hi_site, 'start': last_seq_start, 'end': last_seq_end } return islands
def dont_track(obj): """ will not track obj. support airflow operators, DAGs, and functions. allows excluding from tracking when using dbnd-airflow-auto-tracking, track_modules, and track_dag. can be used as a function and decorator as shown in examples below. Usage examples: dag = DAG() with dag: operator = PythonOperator() dont_track(operator) dont_track(dag) @dont_track def f(): pass """ obj._dont_track = True return obj
def construct_bap_id(subscription_id, group_name, lb_name, address_pool_name): """Build the future BackEndId based on components name. """ return ('/subscriptions/{}' '/resourceGroups/{}' '/providers/Microsoft.Network' '/loadBalancers/{}' '/backendAddressPools/{}').format( subscription_id, group_name, lb_name, address_pool_name )
def fib_dp_memo(n: int, memo: dict ={0: 0, 1: 1}): """Computes the n-th Fibonacci number. Args: n: The number of which Fibonacci sequence to be computed. Returns: The n-th number of the Fibonacci sequence. """ if n not in memo: memo[n] = fib_dp_memo(n-1, memo) + fib_dp_memo(n-2, memo) return memo[n]
def make_key_from_coordinates(indexes): """ Converts indexes to string for hashing :param indexes: the indexes of a hex. nx2, n=number of index pairs :return: key for hashing based on index. """ return [str(int(index[0])) + ',' + str(int(index[1])) for index in indexes]
def _is_contiguous(positions): """Given a non-empty list, does it consist of contiguous integers?""" previous = positions[0] for current in positions[1:]: if current != previous + 1: return False previous = current return True
def length(lst): """returns the length of a list""" if(lst == []): return 0 return 1 + length(lst[1:])
def numbers_appear_once(nums): """ :param nums:array :return: (num1, num2) """ if not nums: return None s = n1 = n2 = 0 for num in nums: s ^= num bit = 0 while s & 1 == 0: s = s >> 1 bit += 1 div = 1 << bit for num in nums: if num & div: n1 ^= num else: n2 ^= num return n1, n2
def updateFreq(ssa, ssb): """ Update the frequency dict """ for k, v in ssb.items(): if k not in ssa: ssa[k] = v else: ssa[k] = ssa[k] + v return ssa
def expp_xor_indep(p1, p2): """ Probability of term t1 XOR t2 being 1 if t1 is 1 with p1 and t2 is 1 with p2. t1 and t2 has to be independent (no common sub-term). Due to associativity can be computed on multiple terms: t1 ^ t2 ^ t3 ^ t4 = (((t1 ^ t2) ^ t3) ^ t4) - zipping. XOR: a b | r ----+--- 1 1 | 0 1 0 | 1 = p1 * (1-p2) 0 1 | 1 = (1-p1)* p2 0 0 | 0 :param p1: :param p2: :return: """ return p1*(1-p2)+(1-p1)*p2
def update_dict(dict, key, val): """ return a list of the results of dict[key] with val appended """ try: l = dict[key] except: l = [] l.append(val) return l
def abs_max_sort(x): """ >>> abs_max_sort([0,5,1,11]) 11 >>> abs_max_sort([3,-10,-2]) -10 """ return sorted(x, key=abs)[-1]
def sanitise_conf(conf): """ Set default configuration values :param conf: <dict> :return: """ conf['username'] = conf.get('username', 'admin') conf['password'] = conf.get('password', 'admin') conf['protocol'] = conf.get('protocol', 'http') conf['host'] = conf.get('host', 'localhost') conf['port'] = conf.get('port', 4502) return conf
def reduce_dataset_name(key): """Divide a dataset name into is base and modifier Args: dataset_name (str): Key to reference a dataset that may or may not have a modifier suffix Returns: tuple of (str, str or None): First string is the base key, the second string is the modifier. """ if key.endswith('/missing'): return tuple(key.rsplit('/', 1)) return key, None
def matrix_product(A, B): """Compute the product of two matrix A and B. If matrix multiplication is impossible, raise an error. Recall that the number of columns in the first matrix must equal the number of rows in the second matrix. >>> I = [ ... [1, 0, 0, 0], ... [0, 1, 0, 0], ... [0, 0, 1, 0], ... [0, 0, 0, 1]] >>> A = [ ... [4, 3, 2, 1], ... [3, 2, 1, 4], ... [2, 1, 4, 3], ... [1, 4, 3, 2]] >>> matrix_product(A, I) == A True """ hA, wA, hB, wB = len(A), len(A[0]), len(B), len(B[0]) assert wA == hB, 'Multiplication impossible: columns of A != rows in B' # construct the placeholder matrix C = [list(range(wB)) for _ in range(hA)] # multiply a specified row in A (y) by a specified column in B (x) def multiply(x, y): col, row = [row[x] for row in B], A[y] C[y][x] = sum(i*j for i, j in zip(col, row)) # multiply all rows in A by columns in B for y in range(hA): for x in range(wB): multiply(x, y) return C
def has_valid_chars(token: str) -> bool: """ decides whether this token consists of a reasonable character mix. :param token: the token to inspect :return: True, iff the character mix is considered "reasonable" """ hits = 0 # everything that is not alphanum or '-' or '.' limit = int(len(token) / 10) for c in token: if not (c.isalnum() or c == '.' or c == '-' or c == ' '): hits += 1 if hits > limit: return False return True
def p_least_(pl, y0, y1): """ ... select periods within [y0, y1] from a list of periods ... """ pl.sort() y0_, y1_ = str(y0), str(y1) def _cmp(x0, x1): n = min(len(x0), len(x1)) return x0[:n] <= x1[:n] a = lambda x: _cmp(y0_, x) and _cmp(x, y1_) b = lambda x, y: a(x) or a(y) c = lambda x, y: _cmp(x, y0_) and _cmp(y1_, y) return [i for i in pl if b(i.split('-')[0], i.split('-')[-1]) or c(i.split('-')[0], i.split('-')[-1])] #a = lambda x: y0 <= int(x) <= y1 #b = lambda x, y: a(x) or a(y) #c = lambda x, y: int(x) <= y0 and int(y) >= y1 #return [i for i in pl if b(i.split('-')[0][:4], i.split('-')[-1][:4]) # or c(i.split('-')[0][:4], i.split('-')[-1][:4])]
def calculate_nmea_checksum(sentence, start='!', seperator=','): """ XOR each char with the last, compare the last 2 characters with the computed checksum Args: sentence(str): the ais sentence as a string start(str): the start of the sentence default = ! separator(str): character that separates the parts of the nmea sentence default = , Returns: True: if calculated checksum = checksum at the end of the sentence False: if checksums do not match """ sentencelist = sentence.rstrip().split(seperator) csum = hex(int(sentencelist[len(sentencelist) - 1].split('*')[1], 16)) start = sentence.find(start) + 1 end = sentence.find('*') data = sentence[start:end] chksum = 0 for char in data: chksum ^= ord(char) chksum = hex(int(chksum)) return bool(csum == chksum)
def corrections(sample_list): """Applies corrections to misidentifications and removes immatures.""" for event in sample_list: try: event.orders('Neuroptera')[0] += event.orders('Trichoptera')[0] event.orders('Trichoptera')[0] = 0 del(event.orders()['Immatures (larvae)']) except KeyError: del(event.orders()['Immatures (larvae)']) return sample_list
def convertCoor(tuple): """ Convert cell tuple from Bennett's gameboard numbering to Rico's numbering """ return (-tuple[0]+1,-tuple[1]+1)
def Edges_Exist_Via(G, p, q): """Helper for del_gnfa_states --- If G has a direct edge p--edgelab-->q, return edgelab. Else return "NOEDGE". We maintain the invariant of at-most one edge such as edgelab for any p,q in the GNFA. """ edges = [ edge for ((x, edge), States) in G["Delta"].items() if x==p and q in States ] if len(edges) == 0: return "NOEDGE" else: return edges
def success(path): """ Create success response :param path: :return: """ return {'status': 'success', 'path': path}
def fibonacci_sum_last_digit(n: int): """ Finds the last digit of a sum of the first n Fibonacci numbers. :param n: the number of Fibonacci numbers :return: the last digit of the sum Example 1: F0 + F1 + F3 = 0 + 1 + 1 + 2 = 4 >>> fibonacci_sum_last_digit(3) 4 Example 2: Sum = 927,372,692,193,078,999,175 >>> fibonacci_sum_last_digit(100) 5 """ digit = n % 60 # the Pisano period for the fibonacci sum is 60 if digit < 2: return digit fibonacci = [0, 1] # holds the last digit of the fibonacci sum for i in range(2, digit + 1): last_digit = (fibonacci[i - 1] + fibonacci[i - 2]) % 10 fibonacci.append(last_digit) return sum(fibonacci) % 10
def get_info_timestep(comp_ts: float): """Method returns a working time step based on computation time step. Args: comp_ts (float): the computation time step Returns: info_ts (float): a time step to export data and show Framework info """ # create variable for info ts if comp_ts <= 0.001: info_ts = 0.01 elif comp_ts <= 0.1: info_ts = 0.5 elif comp_ts <= 10: info_ts = 1 elif comp_ts <= 100: info_ts = 10 elif comp_ts <= 10000: info_ts = 100 else: info_ts = 1000 # return the info timestamp return info_ts
def nx_edge_data_weight(edge_u, edge_v, edge_data): """Return custom edge data value to be used as a callback by nx.""" if edge_data.get("hop"): return edge_data["hop"] return 1
def TTPN_calc(item1, item2): """ Calculate TPR,TNR,PPV,NPV. :param item1: item1 in fractional expression :type item1 : int :param item2: item2 in fractional expression :type item2: int :return: result as float """ try: result = item1 / (item1 + item2) return result except ZeroDivisionError: return "None"
def variance(nums, avg, exhaustive=False): """ Gives variance of input data :param nums: contains numbers to use :type nums: list :param avg: average of nums :type avg: float or int :param exhaustive: optimizes how data is retrieved in TSP :type exhaustive: bool :return: variance :rtype: float or int """ if not exhaustive: total = 0 for i in nums: total += (i - avg) ** 2 return total / len(nums) else: t1, t2 = 0, 0 for i in nums: t1 += i ** 2 t2 += i return (t1 - ((t2 ** 2) / len(nums))) / len(nums)
def get_first_letter(word): """ Returns the first character of word in upercase """ return word[0].upper()
def is_consecutive(lst): """Returns True if `lst` contains all numbers from 0 to `len(lst)` with no duplicates. """ return sorted(lst) == list(range(len(lst)))
def isValidWord(word, hand, wordList): """ Returns True if word is in the wordList and is entirely composed of letters in the hand. Otherwise, returns False. Does not mutate hand or wordList. word: string hand: dictionary (string -> int) wordList: list of lowercase strings """ CHAR_NOT_FOUND_IN_HAND = -1 word = word.lower() if word not in wordList: return False for char in word: charsInHand = hand.get(char, CHAR_NOT_FOUND_IN_HAND) if charsInHand == CHAR_NOT_FOUND_IN_HAND or word.count(char) > charsInHand: return False return True
def construct_came_from(environ): """ The URL that the user used when the process where interupted for single-sign-on processing. """ came_from = environ.get("PATH_INFO") qstr = environ.get("QUERY_STRING", "") if qstr: came_from += '?' + qstr return came_from
def convert_to_bert_vocab(vocab, items): """ Converts a sequence of [tokens|ids] using the vocab. Tokens not in dictionary are skipped. :param vocab: dictionary :param items: list of tokens (strings) :return: """ output = [] for item in items: try: output.append(vocab[item]) except KeyError: continue return output