content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _approx_equal(a, b, tolerance): """Check if difference between two numbers is inside tolerance range.""" if abs(a - b) <= tolerance * a: return True else: return False
2f2c2a15b21e96f7d34e669d99acd1f06c333829
664,126
def cycle_list(k, n): """ Returns the elements of the list ``range(n)`` shifted to the left by ``k`` (so the list starts with ``k`` (mod ``n``)). Examples ======== >>> from sympy.crypto.crypto import cycle_list >>> cycle_list(3, 10) [3, 4, 5, 6, 7, 8, 9, 0, 1, 2] """ k = k % n return list(range(k, n)) + list(range(k))
0be03fd6e1a11c656b632161a3cd302bb88b769b
76,140
def get_lda_topics(lda, top_n=30): """Wrapper function to extract topics from trained tomotopy LDA model (adapted from @ecoronado's get_hdp_topics() method) ** Inputs ** lda:obj -> LDAModel trained model top_n: int -> top n words in topic based on frequencies ** Returns ** topics: dict -> per topic, an array with top words and associated frequencies """ # Get most important topics by # of times they were assigned (i.e. counts) sorted_topics = [k for k, v in sorted(enumerate(lda.get_count_by_topics()), key=lambda x: x[1], reverse=True)] topics = dict() # For topics found, extract only those that are still assigned for k in sorted_topics: topic_wp = [] for word, prob in lda.get_topic_words(k, top_n=top_n): topic_wp.append((word, prob)) topics[k] = topic_wp # store topic word/frequency array return topics
1c2a239a53c75eb795b2c01156621da40b5eb154
193,831
def label_annotations(annotations, inside_label, tokenize=None): """ Creates a list of labels for each character in a list of annotations :param annotations: A list of tuples, where the first is the shape of the annotations, and the second is its label :type annotations: list of (str, str) :param inside_label: The label to use for characters that do not begin an annotation :param tokenize: The function that should be used to tokenize the annotation shapes. Defaults to calling list() :type tokenize: callable :return: A list of labels for the combined annotations :rtype: list of str """ labels = [] tokenize = tokenize or list for shape, label in annotations: for i, _ in enumerate(tokenize(shape)): if i == 0: labels.append(label) else: labels.append(inside_label) return labels
9662c59616444874cadb070e01289f0e895cbaac
432,169
def calculate_szymkiewicz_simpson_coefficient(set_1, set_2): """Calculate Szymkiewicz-Simpson coefficient between two sets. :param set set_1: set 1 :param set set_2: set 2 :return: similarity of the two sets :rtype: float """ intersection = len(set_1.intersection(set_2)) smaller_set = min(len(set_1), len(set_2)) return intersection / smaller_set
1b06925383ea1fc35e8436921c8cc4e89796cf23
297,843
def read_scoring_matrix(filename): """ Helper function to read a scoring matrix from a file. Returns the scoring matrix as a dictionary. """ matrix_file = open(filename, 'r') scoring_matrix = dict() # Read the first line and create a list column_values = matrix_file.readline().split() # Read the other lines for line in matrix_file.readlines(): scores = line.split() row_value = scores.pop(0) scoring_matrix[row_value] = dict() for column_value, score in zip(column_values, scores): scoring_matrix[row_value][column_value] = int(score) matrix_file.close() return scoring_matrix
45598efb8fc8d8e50de4e7344430eea7ae5002aa
475,524
def diag(x1, y1, x2, y2): """Tests for direct diagonalness.""" if abs(x1 - x2) == abs(y1 - y2): return True return False
d8a56cdbd417aa00a59116eccb6ca8fcfd2b8a44
348,357
import socket import time def waitForSSHAvailable(ipAddress, timeout=600): """ Wait until SSH is available, that is its network interface is up :param ipAddress: ipAddress :type ipAddress: str :param timeout: timeout :type timeout: int :returns: whether SSH is available :rtype: bool """ sshSocket = socket.socket() sshSocket.settimeout(1) start = time.time() end = start + timeout while time.time() < end: try: sshSocket.connect((ipAddress, 22)) sshSocket.recv(256) sshSocket.close() break except socket.error: time.sleep(1) return time.time() <= end
adda2e6b7e7e4ee8cbf57000fd74e7726a2c90f2
160,617
def _format_results(results: dict): """Return formatted dictionary containing term and relevance. Args: results (dict): JSON dictionary of Google Autocomplete results. Returns: suggestions (dict): Formatted dictionary containing term and relevance. """ if results: suggestions = [] for index, value in enumerate(results[1]): suggestion = {'term': value, 'relevance': results[4]['google:suggestrelevance'][index]} suggestions.append(suggestion) return suggestions
8f0a1431bbaeac2f6f5381d032a1a1d6d36a3e15
584,113
def _is_decoy_suffix(psm, suffix='_DECOY'): """Given a PSM dict, return :py:const:`True` if all protein names for the PSM end with `suffix`, and :py:const:`False` otherwise. Parameters ---------- psm : dict A dict, as yielded by :py:func:`read`. suffix : str, optional A suffix used to mark decoy proteins. Default is `'_DECOY'`. Returns ------- out : bool """ return all(prot['label'].endswith(suffix) for prot in psm['protein'])
f578a126a9838d761cb64bd85bffbb223f23684e
155,646
def addcss(value, arg): """ Adds a css class to a form field """ css_classes = value.field.widget.attrs.get('class', '').split(' ') if css_classes and arg not in css_classes: css_classes = '%s %s' % (css_classes, arg) return value.as_widget(attrs={'class': css_classes})
9d28ab6e18e9c3fdb6de9044c95ea3c794ffa29d
648,587
def path(index, height): """Return the path from a leaf to the root of a binary hash tree. Keyword arguments: index -- the leaf's index, in range [0, 2^height - 1] height -- the height of the binary hash tree Returns: The path from the leaf at the given index to the root of the binary tree as a list of nodes. Each node is represented as a tuple consisting of the node's layer, and the node's index within this layer. """ # Current layer layer = 0 path = [] while layer < height: path.append((layer, index)) layer += 1 index >>= 1 return path
27d5397209bb914d689ffc193ae995781a5e73ba
460,724
def slugify_iarc_name(obj): """ Converts ratings body's or rating's iarc_name to a slug-like label (e.g. "USK" to "usk"). """ return obj.iarc_name.lower().replace(' ', '-')
fb2798f2ce171db7125a1d990815c5fe5a57e761
539,186
def get_yxs_tuples(points, shape): """ Get a matrix of found points. :param shape: shape of the template. :param points: iterable of points. :return: return a matrix with y and x as row for every found matching. """ yxs = [] yx_saved = (-1, -1) height, width = shape for y, x in points: if yx_saved == (-1, -1) or y > (yx_saved[0] + height) or x > (yx_saved[1] + width): yx_saved = [y, x] yxs.append(yx_saved) return yxs
8749d3ed57451fc1b2eda74b58c3953f4c55a78a
355,132
def measure_raw(state_vect, shots=1_000_000, random_seed=1): """ Make measurements along the z basis, return raw data :param state_vect: qiskit.quantum_info.Statevector object :param shots: int, representing # of measurements :param random_seed: int, for setting the 'randomness' :return: a list of np.arrays containing the measurement results shape = (shots, num_qbits) """ state_vect.seed(random_seed) data_lst = state_vect.sample_memory(shots) results = [] for measurement in data_lst: results.append([float(j) for j in list(measurement)]) return results
b1f35efe8d86330fec52bd11f9f167a2625e1e70
154,533
from typing import Dict def string_to_dict(input_string: str) -> Dict[str, str]: """ Takes a string of a form `key1=val1,key2=val2` and returns the corresponding dict """ items_list = input_string.split(",") output_dict = {} for pair in items_list: pair = pair.split("=") # type: ignore output_dict[pair[0]] = pair[1] return output_dict
11ae154a0f4203048537c965bace016b5cbd4eeb
562,629
def get_coding_annotation_fasta(seq_record): """ When passed a sequence record object returns an array of FASTA strings for each annotation. :param seq_record: A Biopython sequence record object. :return: A FASTA file string containing all sequences record object CDS sequence features. """ fasta = [] features = seq_record.features # Each sequence has a list (called features) that stores seqFeature objects. for feature in features: # For each feature on the sequence if feature.type == "CDS": # CDS means coding sequence (These are the only feature we're interested in) feat_qualifiers = feature.qualifiers # Each feature contains a dictionary called qualifiers which contains # data about the sequence feature (for example the translation) start = int(feature.location.start) # Type-casting to int strips fuzzy < > characters. end = int(feature.location.end) strand = feature.location.strand if strand is None: strand = "?" elif int(strand) < 0: strand = "-" elif int(strand) > 0: strand = "+" else: strand = "?" location = "[" + str(start) + ":" + str(end) + "](" + strand + ")" # Gets the required qualifiers. Uses featQualifiers.get to return the qualifiers or a default value if the qualifiers # is not found. Calls strip to remove unwanted brackets and ' from qualifiers before storing it as a string. protein_id = str(feat_qualifiers.get('protein_id', 'no_protein_id')).strip('\'[]') if protein_id == 'no_protein_id': continue # Skips the iteration if protein has no id. protein_locus = str(feat_qualifiers.get('locus_tag', 'no_locus_tag')).strip('\'[]') gene = str(feat_qualifiers.get('gene', 'no_gene_name')).strip('\'[]') product = str(feat_qualifiers.get('product', 'no_product_name')).strip('\'[]') translated_protein = str(feat_qualifiers.get('translation', 'no_translation')).strip('\'[]') fasta_part_one = ">" + protein_id + " " + gene + "-" + product + " (Locus: " + protein_locus + ")" fasta_part_two = " (Location: " + location + ")" + "\n" + translated_protein + "\n" fasta.append(fasta_part_one + fasta_part_two) fasta_string = "".join(fasta) return fasta_string
4fa24279ebb89ea7c61eeae6614c1fa309ffad87
13,936
def verbose_name_plural(obj): """ Returns the plural verbose name for a Django Model instance. """ return obj._meta.verbose_name_plural
bdaa38bfb308419d244c6e552475778810dc9088
77,105
def merge(b, c): """ Arguments: Array1, Array2: Two sorted arrays b and c. Return: array_d: The merged version of b and c """ i = 0 j = 0 k = 0 n1 = len(b) n2 = len(c) array_d = [None]*(n1+n2) # traverse both arrays while i<n1 and j<n2: if b[i] <= c[j]: array_d[k] = b[i] i+=1 k+=1 else: array_d[k] = c[j] j+=1 k+=1 while i < n1: array_d[k] = b[i] i+=1 k+=1 while j < n2: array_d[k] = c[j] j+=1 k+=1 return array_d
b66bd792cf72a3fd8eac314d0c9c1960b94fd158
549,639
def get_valid_moves(board, moves): """ Return the moves that can be played on the board @param board: board of the current game @param moves: a list of moves @return: moves that are valid """ for move in moves: if not board[move.line, move.column] == 0: moves.remove(move) return moves
23ee3a8c51a4677dfa017c8f33196f480f712f07
418,729
def get_dataframe_head(dataframe, number=5): """ Gets the first number of results (5 by default) from the dataframe Arguments are the dataframe and the first number of rows to return Number can also be a negative amount, in which case it will return everything but the last number of rows """ df = dataframe.head(number) return df
67df19665338948c52a1805c2492a5a5a1a3e5fe
559,167
def logReSubn(subnExpr, preComment, postComment, errorMsg='') -> str: """ Takes in the result of a re.subn call, subnExpr, and logs preComment to stdout, then logs postComment and specifies the number of subs. Prints errorMsg in case of 0 subs. Returns the string from subnExpr with replacements made. """ out = subnExpr[0] subs = subnExpr[1] print(preComment) print(str(subs) + ' ' + postComment) if (subs == 0 and errorMsg != ''): print(errorMsg) return out
141c9b6e00b55b1d70d54cb1a32a478c42f55452
446,414
def get_events(data): """ dict -> list Return the events from a Meetup request """ return data["events"]
314c46771758d86994ecd926d992376d8055f383
460,675
def split_castep(filename): """ Split .castep file into each calculation Running CASTEP several times yields a single .castep file with concatenated output. This function splits the outputs into a list of each calculation run. Parameters -------- filename : str path to the .castep file Returns -------- run_list : list list of lines of castep output for each run """ with open(filename, "rt") as f: lines = f.readlines() castep_header = [ ' +-------------------------------------------------+\n', ' | |\n', ' | CCC AA SSS TTTTT EEEEE PPPP |\n', ' | C A A S T E P P |\n', ' | C AAAA SS T EEE PPPP |\n', ' | C A A S T E P |\n', ' | CCC A A SSS T EEEEE P |\n', ' | |\n', ' +-------------------------------------------------+\n' ] header_position_list = list() for l in range(len(lines) - 9): if lines[l: l + 9] == castep_header: header_position_list.append(l) header_position_list.append(-1) run_list = [lines[l_s:l_e] for l_s, l_e in zip(header_position_list[:-1], header_position_list[1:])] return run_list
0b069f198833ec0f4db3d76f001be51b7b84b7b2
328,605
import re def split_bucket(s3_key): """ Returns the bucket name and the key from an s3 location string. """ match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE) if not match: return None, s3_key return match.group(1), match.group(2)
6b854bdc9d105643a9fa528e6fefd19672451e63
1,261
import math def trackDistance(mPoint1, mPoint2): """ Return the triangulated distance between two tracking locations """ x1, y1 = mPoint1 x2, y2 = mPoint2 trackLen = abs(math.hypot(x2 - x1, y2 - y1)) return trackLen
177b21b3d31cac918dc694759603d759c41a0fe2
496,599
def _create_into_array_list(phrase): """Create load or store into array instructions For example, with the phrase 'astore': [ 'iastore', 'fastore', ... 'aastore' ] """ # noinspection SpellCheckingInspection return [letter + phrase for letter in 'lfdibcsa']
3ae0bfbcd1ef3b6fcb8538ccb02c3a150151458e
661,390
def escape(inp, char_pairs): """Escape reserved characters specified in the list of tuples `char_pairs` Parameters ---------- inp : str Input string chair_pairs : list List of tuples of (character, escape sequence for character) Returns ------- str Escaped output See also -------- unescape_GFF3 """ for char_, repl in char_pairs: inp = inp.replace(char_, repl) return inp
80a00957e3ecc3ce7bf33953a430da82ad3350c5
478,149
from typing import List from typing import Tuple def has_valid_order(indexed_files: List[Tuple[int, str]], location: str) -> bool: """Verify that sequence numbers are in order without gaps or duplicates. Args: files: List of seqnum, filename for a list of Flyway files. location: Where the list of files came from (for error reporting). Returns: True if the file list is valid. """ last_index = 0 valid = True for seqnum, filename in indexed_files: if seqnum == last_index: print('duplicate Flyway file sequence number found in %s: %s' % (location, filename)) valid = False elif seqnum < last_index: print('File %s in %s is out of order.' % (filename, location)) valid = False elif seqnum != last_index + 1: print('Missing Flyway sequence number %d in %s. Next file is %s' % (last_index + 1, location, filename)) valid = False last_index = seqnum return valid
f2243de1dd8a43eac6ce250e0b6552eed4256ba9
426,649
import pickle def load_from_file(filename): """Load a past state from file. """ f=open(filename,'rb') try: return pickle.load(f) except EOFError: print('Nothing written to file.')
3ccc2938e90bd886a18b583568391b2a975d2a52
649,227
def define_options(enable=[], disable=[]): """ Define the options for the subscribed events. Valid options: - CRfile: file created - CRdir: directory created - MDfile: file modified - MDdir: directory modified - MVfile: file moved - MVdir: directory moved - DLfile: file deleted - DLdir: directory deleted - all: for disable only. Disables all options. By default all options are enabled. If all options are disabled, 'enable' options are applied. If all options are not disabled, 'disable' options are disabled. """ default_options = [ 'CRfile', 'CRdir', 'MDfile', 'MDdir', 'MVfile', 'MVdir', 'DLfile', 'DLdir' ] if disable == enable == []: return default_options elif 'all' in disable: return list(set(enable) & set(default_options)) else: return list(set(default_options) - set(disable))
a0b35008bebe70d3ec7b964122e24746ee89d1cc
658,583
def bit_string_to_bitlist(strarray): """Convert a string into a list of ordinals to do bit operations on.""" return map(ord, strarray)
78c03b690e3000eea786b2d91b6d0e430209238a
360,926
def read_file(path: str) -> str: """Method to read content of a file. Args: path (str): path to file. Returns: str: content of the file. """ with open(path, 'r', encoding='utf8') as file: content = file.read() return content
7c96bb59505ec19d31b2e0aff534c5aeb3da8ac2
582,254
def check_view_filter_and_group_by_criteria(filter_set, group_by_set): """Return a bool for whether a view can be used.""" no_view_group_bys = {"project", "node"} # The dashboard does not show any data grouped by OpenShift cluster, node, or project # so we do not have views for these group bys if group_by_set.intersection(no_view_group_bys) or filter_set.intersection(no_view_group_bys): return False return True
8baeeb827ba092a3b5262f076ba1fa77aaf55b5f
697,335
from pathlib import Path def get_path_context(filename): """ Takes a filename as input (string). Returns path to file. Path is preceded by "file:\\" """ filepath = Path.cwd() / "negation" / "KB" / "lexicon" / filename filepath = "file:\\" + str(filepath) return(filepath)
166f05a52ec1481c3f6691b41c4047fb325ed2d4
424,367
def _add_param(name, value, result, param_appearance_counters): """Add a parameter to the parse result dictionary. If the parameter name already exists in the dictionary then a counter is appended - e.g. myparam-2, myparam-3 etc. Arguments: name - The name of the parameter to add value - The value of the parameter to add result - The parse result dictionary to add the parameter to. param_appearance_counters - Dictionary that tracks how many times parameter names have already been used. Returns: The unique name used in the dictionary for the param. Will be the name specified in the call to the function unless that name already existed in the dictionary, in which case it will have an incrementing number appended. """ if name not in result: param_appearance_counters[name] = 1 unique_name = name else: # Param name already exists - append a counter param_appearance_counters[name] += 1 unique_name = '{0}-{1}'.format(name, param_appearance_counters[name]) result[unique_name] = value return unique_name
3acca2335db952eefd26d4d6ff17b8e6273709a2
131,637
def gen_waveform_name(ch, cw): """ Return a standard waveform name based on channel and codeword number. Note the use of 1-based indexing of the channels. To clarify, the 'ch' argument to this function is 0-based, but the naming of the actual waveforms as well as the signal outputs of the instruments are 1-based. The function will map 'logical' channel 0 to physical channel 1, and so on. """ return 'wave_ch{}_cw{:03}'.format(ch+1, cw)
bfd8ebea77ae490ed0e2a2ceffd0227e02367239
185,491
def normalize_acl(acl): """ Normalize the grant and/or revoke lists we were given. Handle single item, list of items, etc. We want to end up with just a list of IDs. Used by Project object to handle modify_access function. :param acl: a list of Person objects or just a Person object :return: a list of integers representing the IDs of the Person objects in the list given """ try: acl = [int(a) for a in acl] # convert Person objects to list of ID integers except TypeError: # ok maybe we got a single Person instead of a list of them acl = [int(acl)] return acl
6a84d093967d4bbc03dd93e729a85b638fb059bc
370,992
import click def check(fn, error_message=None): """ Creates callback function which raises click.BadParameter when `fn` returns `False` on given input. >>> @click.command() >>> @click.option('--probability', callback=check(lambda x: 0 <= x <= 1, "--probability must be between 0 and 1 (inclusive)")) >>> def f(probability): >>> print('P:', probability) """ def f(ctx, param, value): if fn(value): return value else: if error_message is None: msg = str(value) else: msg = '{}. Value: {}'.format(error_message, value) raise click.BadParameter(msg) return f
8519e6c29a1cb2843260570a8888b551d8c4987c
36,676
import torch def directional_derivatives(lin_op, directions, device): """``lin_op`` represents a curvature matrix (either ``GGNLinearOperator`` or ``HessianLinearOperator`` from ``vivit.hessianfree``). ``directions`` is a ``D x nof_directions`` matrix, where ``nof_directions`` directions are stored column-wise. For every direction ``d``, we compute ``d.T @ lin_op_matrix @ d``. """ nof_directions = directions.shape[1] derivs = torch.zeros(nof_directions) for d_idx in range(nof_directions): # Pick direction and convert to numpy d = directions[:, d_idx] d_np = torch.clone(d).cpu().numpy() # Compute directional derivative mat_vec_product = torch.from_numpy(lin_op.matvec(d_np)).to(device) derivs[d_idx] = torch.inner(mat_vec_product, d).item() return derivs
b91f1f09159d1f09933f007b3b1b4cec1da3513d
668,688
def multi_powmod(bases, exponents, modulus): """ raise all bases in xs to the respective powers in ys mod n: :math:`\prod_{i=1}^{len(bases)} base_i^{exponent_i} \pmod{modulus}` :param bases: the bases :param exponents: the exponents :param modulus: the modulus :return: the calculated result """ if len(bases) != len(exponents): raise ValueError("xs and ys don't have the same size") result = 1 for base, power in zip(bases, exponents): result = (result * pow(base, power, modulus)) % modulus return result
b8b0bcc32e7938996d20044fcd4e0649273d0ee5
17,626
def sets_to_mapping(s): """ Input: {cluster_name: set([cluster_items])} dictionary Output: {cluster_item: cluster_name} dictionary """ return {m: k for k, ms in s.items() for m in ms}
b9ee3aa9cfaf47a0bc956ddc0a53ae16d51d269b
586,194
import click def parse_version_str(version_str): """ Parse a string with 3 positive integers seperated by period (CIM version string) into a 3 integer tuple and return the tuple. Used to parse the version value of the DMTF Version qualifier. Parameters: version_str (:term: str): String defining 3 components of a CIM version Returns: tuple containing 3 integers Raises: click.ClickException if the version_str is invalid (not integers, not seperated by ".", not 3 values) """ try: version_tuple = [int(x) for x in version_str.split('.')] except ValueError: raise click.ClickException('--since option value invalid. ' 'Must contain 3 integer elements: ' 'int.int.int". {} received'. format(version_str)) if len(version_tuple) != 3: raise click.ClickException('Version value must contain 3 integer ' 'elements (int.int.int). ' '{} received'.format(version_str)) return version_tuple
f896c5f6da20370c06fa3b93f6e76da38b1bc7f3
534,953
def td_print(td): """ Print formatter for pd.timedelta """ comps = td.round("1s").components retval = "" for i, s in enumerate(["days", "hours", "min", "sec"]): if comps[i] > 0: retval += f"{comps[i]}{s} " if retval == "": retval = "$\\cdot$" return retval
968ec2fcfcb11b90ce17df5eb713d46f6c0e4441
514,866
import pickle import dill def maybe_dill_dumps(o): """Pickle using cPickle or the Dill pickler as a fallback.""" # We need to use the dill pickler for objects of certain custom classes, # including, for example, ones that contain lambdas. try: return pickle.dumps(o, pickle.HIGHEST_PROTOCOL) except Exception: # pylint: disable=broad-except return dill.dumps(o)
d4fe01daff1ba77a9d02718ad04341960d606e10
519,041
def normalize(path): """Replace spaces with underscores, everything to lowercase, remove double slashes :returns: normalized string """ return path.replace(" ", "_").lower().replace("//", "/")
2e0e9dedd4b33114b30e107f59555d83efaf6dc9
449,104
import math def color_distance(color1, color2): """ calculate distance between two colors """ channels = zip(color1, color2) sum_distance_squared = 0 for c1, c2 in channels: sum_distance_squared += (c1 - c2) ** 2 return math.sqrt(sum_distance_squared)
994dccd5e6d3a56b292ba7138b58f133ee6557a3
539,335
def single_l1(value_1, value_2): """ compute L1 metric """ return abs(value_1 - value_2)
d9f5b91f9318995d91266632c904cc135181bec7
528,384
def appropriate_number(user_input, lower_limit=1, upper_limit=10): """Returns true if user input is a number that is bigger than or equal to the lower limit and smaller than the upper limit. Returns false otherwise. """ try: a = int(user_input) if a < lower_limit: return False elif a >= upper_limit: return False else: return True except ValueError: return False
4e88c2ed242bb592d55e25666d3c8407eab57fb7
285,259
from datetime import datetime def to_datetime(s): """Parse a datetime string from docker logs to a Python datetime object Args: s (str): Docker datetime string Returns: datetime: Python datetime object """ s = s.split(' ')[0] s = s.replace('T', ' ') s = s.replace('Z', '') s = s[:s.find('.') + 7] return datetime.strptime(s, '%Y-%m-%d %H:%M:%S.%f')
1498fd6864bcc2d92ba8cc20a36ebe97e89673e1
439,745
def names(name_instance, fields=None): """Return of dict of name variations for the name class. Args: name_instance: BaseName subclass instance fields: list, limit the returned dict to include only elements whose keys are in this list. Returns: dict, example { 'name_for_file': 'File Name', 'name_for_search': 'Search Name', 'name_for_url': 'Url Name', } """ name_dict = { 'name_for_file': name_instance.for_file(), 'name_for_search': name_instance.for_search(), 'name_for_url': name_instance.for_url(), } if fields is not None: name_dict = dict( [(x, name_dict[x]) for x in list(name_dict.keys()) if x in fields]) return name_dict
930dce71186bdaec525a25daa867c2498567e3e5
210,463
def unsplit_complex(list_real, list_imag): """Returns complex list by combining the real and imaginary parts from two separate lists. """ return [complex(x,y) for x,y in zip(list_real, list_imag)]
4a5fb1fcdd604525abdebf3556b88e973a8b3dbf
498,439
def argsort(seq): """ Argsort a list of string Parameters: ----------- seq: list of words list of words to sort Returns: -------- sorted_seq: list of int (indices) list of indices (same size as input seq) sorted (eg. seq=['foo', 'bar','foo','toto'] => out=[1,0,2,3]) """ return sorted(range(len(seq)), key=seq.__getitem__)
69dc07786bd4ebfe00ab573c2148eb52c9e7b442
599,672
def ParseBool(text): """Parse a boolean value. Args: text: Text to parse. Returns: Boolean values parsed Raises: ValueError: If text is not a valid boolean. """ if text in ('true', 't', '1', 'True'): return True elif text in ('false', 'f', '0', 'False'): return False else: raise ValueError('Expected "true" or "false".')
75b4aab71b9bfd7772dc609bb45654b94790c7f7
296,607
def firstNN(*args): """ Return the first argument not None. Example ------- >>> firstNN(None, False, True) False >>> firstNN(True, False, True) True >>> firstNN(None, None, True) True >>> firstNN(None, 2, True) 2 >>> firstNN(None, None, None) None >>> firstNN() None """ return next(filter(lambda x: x is not None, args), None)
f8f0e2d573c46edbbd1458fdba9195d44248d17b
278,568
def add_links(self, value): """ This creates a Link header from the links Parameters ---------- self : lib.schema.riak.objects.Link The Link schema object value : dict The headers of the request to add the links to """ links = getattr(self, 'links', []) if links: value['Link'] = ', '.join( link._as_dict()['location'] for link in links) return value
3d507ce928b227399ca325d5a55cab6b5ae07791
12,777
import requests def get_jobdesc_config(job): """Function for extracting job description config file.""" job_desc_url = '%s/job_description.json' % job['job_url'] r = requests.get(job_desc_url, verify=False) r.raise_for_status() return job_desc_url
33d03c0267f8d55a61c25163c586c138e302fee2
676,321
def user_exists(ssh_fn, name): """Test is a user exists. The ssh_fn is a function that takes a command and runs it on the remote system. It requires the the ssh_fn raises an exception if the command fails. :param ssh_fn: a ssh_fn that can run commands on the unit. :type ssh_fn: Callable[[str], str] :param name: the name of the user to test if exists. :type name: str :returns: True if the user exists :rtype: bool """ cmd = ["grep", "-c", "^{name}:".format(name=name), "/etc/passwd"] try: ssh_fn(cmd) except Exception: return False return True
17d1eceff1017263316ad4e47f2e5c60643204aa
297,385
from typing import Iterable from typing import Any def only_one(iterable: Iterable[Any]) -> Any: """Consume and return first and only item of an iterable. If the iterable has exactly one item, return that item, otherwise raise an exception. Arguments: iterable: iterator or collection expected to contain only one item Returns: single item of iterable Raises: ValueError: the iterable is empty or has more than one item Examples: >>> only_one([]) Traceback (most recent call last): ... ValueError: expected 1 item, got empty iterable >>> only_one(['test']) 'test' >>> only_one(['this', 'should', 'fail']) Traceback (most recent call last): ... ValueError: expected only 1 item in iterable """ iterator = iter(iterable) try: value = next(iterator) except StopIteration: msg = f'expected 1 item, got empty iterable' raise ValueError(msg) try: next(iterator) except StopIteration: return value else: msg = f'expected only 1 item in iterable' raise ValueError(msg)
9f660922689f11d1e374d7fc88af9a19a8ba9b4c
470,807
def get_maintainers_account_creation_date(pypi_profiles): """Retrieve dates that maintainers' PyPI accounts were created""" dates = [] # Loop through beautiful soup-ified maintainer data to extract dates for soup in pypi_profiles["maintainers_data"]: # Because 'time' elements will appear in multiple locations on # a PyPI maintainer profile page, filter in only those html # tags associated with author metadata author_metadata_elements = soup.findAll( "div", {"class": "author-profile__metadiv"} ) for elem in author_metadata_elements: # Extract any time-related elements and add to dates list # if it exists date = elem.find("time") if date: # The [0] slice is because contents is a list and # strip() is not a valid method for lists dates.append(date.contents[0].strip()) return dates
06de38d26db877ff0c3ad8bcc197744cac31f3de
385,410
import re def cqlstr(string): """Makes a string safe to use in Cassandra CQL commands Args: string: The string to use in CQL Returns: str: A safe string replacement """ return re.sub('[-:.]', '_', string)
e505d855e374109edee0a1d5e76ff0cdeab64581
25,529
from typing import Sequence from typing import Hashable def all_items_present(sequence: Sequence[Hashable], values: Sequence[Hashable]) -> bool: """ Check whether all provided `values` are present at any index in the provided `sequence`. Arguments: sequence: An iterable of Hashable values to check for values in. values: An iterable of Hashable values for whose presence to check `sequence` for. Returns: `True` if all `values` are present somewhere in `sequence`, else `False`. """ return all(k in sequence for k in values)
f43a881159ccf147d3bc22cfeb261620fff67d7a
703,955
import re def parse_time(time_string) -> int: """ Parse a time stamp in seconds (default) or milliseconds (with "ms" unit) The "s" unit is optional and implied if left out. Args: time_string(str): timestamp, e.g., "0.23s", "5.234" (implied s), "1234 ms" must be a number followed by "s", "ms" or nothing. Returns: int: time represented by time_string in milliseconds """ time_pattern = re.compile( r""" \s* # ignore leading spaces ([0-9.]+) # Numerical part \s* # optional spaces ( (s|ms) # optional units: s (seconds) or ms (milliseconds) \s* # ignore trailing spaces )? """, re.VERBOSE, ) match = time_pattern.fullmatch(time_string) if match: units = match[3] if units == "ms": return int(match[1]) else: return int(1000 * float(match[1])) else: raise ValueError( f'cannot convert "{time_string}" to a time in seconds or milliseconds' )
b9d3d1cc4f388def47b3dd620a656f60f7c50929
684,384
def is_unweighted(matrix): """Quick function to determine if a Matrix is likely an unweighted adjacency matrix""" return matrix[matrix.nonzero()].min() == 1 and matrix.max() == 1
37fe5491aa13da8bf48236fa351ba5b58c74ab21
618,923
def defaultkeys(defaults, keywords): """ Return dictionary of default keywords. Overwrite any keywords in ``defaults`` using keywords from ``keywords``, except if they are None. Parameters ---------- defaults, keywords : dict Dictionary of default and replacing keywords. Returns ------- keywords : dict """ # overwrite value with the one in kwargs, if not then use the default for key, value in defaults.items(): if keywords.setdefault(key, value) is None: keywords[key] = value return keywords
96094a2d58f3fb60b430fa731d985a0e955d4e48
443,405
def avoid_wrapper(source_host, host): """ Check if this combination of source and destination host should avoid running virt-v2v. """ return source_host and source_host.avoid_wrapper(host)
1256743e505e3fe8dea81dc2cb92fac48033127f
551,659
def header_exists(header_name, headers): """Check that a header is present.""" return header_name.lower() in (k.lower() for (k, _) in headers)
bf0dd31a9febbed20efb7e8078bc085b5fe4f792
646,576
import json def get_titles_from_bill_meta(bill_meta_path: str): """ Parses the bill meta file and returns a list of titles. """ with open(bill_meta_path, 'r') as f: bill_meta = json.load(f) titles = bill_meta['titles'] return titles
c32ddf642cb54f04a1299752496750e969d18bf1
423,345
import torch def convert_tensors_recursively_to(val, *args, **kwargs): """ Applies `.to(*args, **kwargs)` to each tensor inside val tree. Other values remain the same.""" if isinstance(val, torch.Tensor): return val.to(*args, **kwargs) if isinstance(val, (tuple, list)): return type(val)(convert_tensors_recursively_to(item, *args, **kwargs) for item in val) return val
76525788cf89732ef2eed2822ac98e2be0fe14a6
49,709
def rotation_angles(step=5, limit=45): """ Create a list rotation angles """ angles=[0,-90,90] for angle in range(step,limit,step): for sign in [-1,1]: for base in [0,-90,90]: angles.append(base+sign*angle) return angles
b687c347e170f6d7ac27e7c4622ca779fca9f088
308,920
def fill_nan(df,list): """ Fill nan values with either mean or mode Parameters ---------- df : dataframe dataframe used for checking and filling nan values list: list list of columns to be checked Returns ------- df : dataframe modified dataframe with no nan values """ for col in list: replacement=df[col].dropna().mean() df[col]=df[col].fillna(replacement) return df
5f07eb835c4e20b08e98d319b4749694ad81474f
655,518
def var_data(f, N): """ Variance of MLE of beta for quantitative trait, assuming var(y) = 1 Args: f: minor allele freq N: sample size Returns: variance of MLE beta """ return 1 / (2 * N * f * (1 - f))
4aedc7fb5c4dcfa18c373785404666d6fd645d2e
366,884
def find_and_set_gpus(gpu_mon, force_GPU, num_GPUs): """ Given a MultiPlanarUnet GPUMonitor object and the parsed command-line arguments, either looks for free GPUs and sets them, or sets a forced GPU visibility. Specifically, if args.force_GPU is set, set the visibility accordingly, count the number of GPUs set and return this number. If not, use args.num_GPUs currently available GPUs and return args.num_GPUs Args: gpu_mon: (GPUMonitor) Initialized GPUMonitor force_GPU: (string) A CUDA_VISIBLE_DEVICES type string to be set num_GPUs: (int) Number of free/available GPUs to automatically select using 'gpu_mon' when 'force_GPU' is not set. Returns: (int) The actual number of GPUs now visible """ if not force_GPU: gpu_mon.await_and_set_free_GPU(N=num_GPUs, stop_after=True) else: gpu_mon.set_GPUs = force_GPU return gpu_mon.num_currently_visible
6906fb279289de08b9dca8e46b656cdb0833b4b9
325,723
def tensor2npy(x): """Convert Tensor to Numpy""" out = x.detach().cpu().numpy().transpose(1, 2, 0) return out
8607b113a78c90deb9932d4236b44ff1617d5edb
558,466
import re def remove_comments(text: str) -> str: """Removes comments from given text. Args: text: str. The text from which comments should be removed. Returns: str. Text with all its comments removed. """ return re.sub(r' //.*\n', r'', text)
68a9e0b4b823f17e24e4c3e393b56b7d410aec17
302,781
def make_safe_filename(name): """Replace characters that are not allowed in windows file names.""" for char in ('/', '\\', ':', '*', '?', '"', '<', '>', '|'): # Windows illegal folder chars if char in name: name = name.replace(char, " ") # replacables: ['∕','⧵' ,'˸','⁎','ॽ','“','ᑄ','ᑀ','┃'] #similar chars return name
776da54097ce3fcb0f4aae23f80ac0932654d800
670,286
import logging def get_logger(logger_name, propagate=True): """ This function provides a simple wrapper to add a null handler to the logger requested so that we make sure not to dump stuff to terminal as defined by default configuration in case the library user do not want to use logging (or didn't care about configuring it). Args: logger_name (str): the logger instance name (usually the module name with __name__) propagate (bool): whether to propagate the messages up to ancestor loggers Returns: logging.Logger: Logger instance Raises: None """ # if logger instance does not exist it will be created upon call logger = logging.getLogger(logger_name) logger.propagate = propagate # add the null handler to make sure nothing is written in case user didn't # configure logging logger.addHandler(logging.NullHandler()) return logger
74bf3497b7fb062182bdd56b9e6ffb8d550512dd
692,358
def filter_hadgem_stream2(queryset): """ From a queryset of pdata_app.DataRequest objects return just the data requests that are required in the PRIMAVERA Stream 2 simulations for HadGEM data. :param django.db.models.query.QuerySet queryset: :returns: Just the Stream 2 data requests :rtype: django.db.models.query.QuerySet """ low_freq = queryset.filter( variable_request__frequency__in=['mon', 'day'] ).exclude( variable_request__table_name='CFday' ).distinct() cfday = queryset.filter( variable_request__table_name='CFday', variable_request__cmor_name='ps' ).distinct() six_hr = queryset.filter( variable_request__table_name='Prim6hr', variable_request__cmor_name='wsgmax' ).distinct() three_hr = queryset.filter( variable_request__table_name__in=['3hr', 'E3hr', 'E3hrPt', 'Prim3hr', 'Prim3hrPt'], variable_request__cmor_name__in=['rsdsdiff', 'rsds', 'tas', 'uas', 'vas', 'ua50m', 'va50m', 'ua100m', 'va100m', 'ua7h', 'va7h', 'sfcWind', 'sfcWindmax', 'pr', 'psl', 'zg7h'] ).distinct() # Combine together and return return low_freq | cfday | six_hr | three_hr
2f431befbf1c6a05ac953afc061a5b29566215e8
452,775
def longest_word_len(filename): """ Find the length of the longest word """ with open(filename, 'r') as file_p: words = file_p.read().split() max_len = len(max(words, key=len)) return max_len
94006ecbe986dfc4bbf0db8e6250f9e6bfde5e7f
440,560
def get_ax_location(legend_style): """ Get the legend location from the verticalAlign key or return default """ align = legend_style.get('align', None) vertical_align = legend_style.get('verticalAlign', None) if not align or not vertical_align: return 'best' vertical_align = vertical_align.replace('top', 'upper').replace( 'bottom', 'lower') return f'{vertical_align} {align}'
fc9f7096bc59c96237cb13c17799f7138271355e
485,733
def nchw2nlc2nchw(module, x, contiguous=False, **kwargs): """Flatten [N, C, H, W] shape tensor `x` to [N, L, C] shape tensor. Use the reshaped tensor as the input of `module`, and the convert the output of `module`, whose shape is. [N, L, C], to [N, C, H, W]. Args: module (Callable): A callable object the takes a tensor with shape [N, L, C] as input. x (Tensor): The input tensor of shape [N, C, H, W]. contiguous: contiguous (Bool): Whether to make the tensor contiguous after each shape transform. Returns: Tensor: The output tensor of shape [N, C, H, W]. Example: >>> import torch >>> import torch.nn as nn >>> norm = nn.LayerNorm(4) >>> feature_map = torch.rand(4, 4, 5, 5) >>> output = nchw2nlc2nchw(norm, feature_map) """ B, C, H, W = x.shape if not contiguous: x = x.flatten(2).transpose(1, 2) x = module(x, **kwargs) x = x.transpose(1, 2).reshape(B, C, H, W) else: x = x.flatten(2).transpose(1, 2).contiguous() x = module(x, **kwargs) x = x.transpose(1, 2).reshape(B, C, H, W).contiguous() return x
5cf89b8519ca4f7997efbf4ef532e8188f560140
368,230
def mojang_time_format(dt): """ Formats datetime to Mojang's format. :param datetime dt: datetime >>> mojang_time_format(datetime(2010, 4, 25, 23, 43, 20)) '2010-04-25 23:43:20 +0000' """ return dt.strftime('%Y-%m-%d %H:%M:%S +0000')
3f97639754818a005d414c0f9e0e76e1b18cc03d
545,557
def compare_complete_match(span1, span2): """ Compare two spans and return true, if lower words in those spans are equal (stop words excluded) Parameters ---------- span1: Span First span to compare. span2: Span Second span to compare. Returns ------- bool True if spans match. """ if " ".join([t.lemma_.lower() for t in span1 if not t.is_stop]) == " ".join([a.lemma_.lower() for a in span2 if not a.is_stop]): return True return False
84c31a2d621f4b17f80ec9f37d272b4967d23505
220,663
def _format_recipients(recipients): """Take a list of recipient emails and format it nicely for emailing.""" return ", ".join(recipients)
6f0d7c0c4d9637d204b9a70dbb996c2b65fdbed9
504,925
import torch from typing import OrderedDict def load_model(fname,model): """ Load saved model's parameter dictionary to initialized model. The function will remove any ``.module`` string from parameter's name. Parameters ---------- fname : :py:class:`str` Path to saved model model : :py:class:`torch.nn.Module` Initialized network network architecture Returns ------- model : :py:class:`torch.nn.Module` Up-to-date neural network model """ checkpoint = torch.load(fname,map_location=lambda storage, loc: storage) state_dict = checkpoint['model'] new_state_dict = OrderedDict() for k, v in state_dict.items(): if k.startswith('module.'): k = k[7:] new_state_dict[k] = v checkpoint['model'] = new_state_dict model.load_state_dict(checkpoint['model']) return model
561a0d2557aa92321cd8a3603cfc9d0f3cbb4b29
664,391
from typing import Tuple def ntp2parts(ntp: int) -> Tuple[int, int]: """Split NTP time into seconds and fraction.""" return ntp >> 32, ntp & 0xFFFFFFFF
729b3f9ce912e1be54c0c5bafd9c5577a78091b9
691,877
from typing import Union from pathlib import Path import json def parse_metadata_file(metadata_path: Union[str, Path]) -> dict: """ Parse a json file containing metadata about a compendium's samples Arguments --------- metadata_path: The file containing metadata for all samples in the compendium Returns ------- metadata: The json object stored at metadata_path """ with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) return metadata
c160948ea9f47476f34c71157b3f74ddca106a92
273,157
def get_data(file_name): """Transforms text from file into a list.""" data_lst = [] with open(file_name, 'r') as f: for line in f: data_lst.extend(line.split()) return data_lst
4a5c0caf5ea7c6c86fee8581b73e23e7f4dbb2c4
353,882
import re import click def prepare_url(url): """Validate and prepare the URL of the test""" parsed = re.search(r'(http[s]*://.*)/tests/([0-9]*)[#|/]*.*', url) if parsed: click.echo('Detected test %s/tests/%s instance' % (parsed.group(2), parsed.group(1)) ) return parsed.group(1)+'/tests/'+parsed.group(2) else: click.echo('The URL cannot be detected!') exit(1)
736e217db0eac020e8e7f4075ae32e85809f7bbc
276,550
def complete_out_of_view(to_check_box, im_w, im_h): """ check if the bounding box is completely out of view from the image :param to_check_box: bounding box, numpy array, [x, y, x, y] :param im_w: image width, int :param im_h:image height, int :return: out of view flag, bool """ complete_OofV_left = (to_check_box[0] < 0) and (to_check_box[2] < 0) complete_OofV_top = (to_check_box[1] < 0) and (to_check_box[3] < 0) complete_OofV_right = (to_check_box[2] > im_w) and (to_check_box[0] > im_w) complete_OofV_bottom = (to_check_box[1] > im_h) and (to_check_box[3] > im_h) complete_OofV = complete_OofV_left or complete_OofV_top or complete_OofV_right or complete_OofV_bottom return complete_OofV
671d9a774bcab4b45df7ac3f6f1266f95d7674c1
666,320
def format_dict(dict_: dict) -> str: """Method to take a dictionary and return a string of comma seperated key, value in below format >>> d = {"key1": "value1", "key2": "value2"} >>> s = format_dict(d) >>> assert s == "key1=value1, key2=value2" """ pairs = [] for key, value in dict_.items(): pairs.append(f"{key}={value}") return ", ".join(pairs)
23203899394a60dd068666b1e08ce61c901aa8b2
241,424
def check_for_contest(cvr, contest_name): """ check if a single cvr contains a given contest Parameters: ----------- cvr : list a single CVR contest_name: string name of contest Returns: -------- contest_present : boolean whether contest is present in the CVR """ if contest_name in cvr.votes.keys(): contest_present = True else: contest_present = False return contest_present
09ac5fdbfe2c3c5adfa70e435b2a810caa6b66ad
502,904
def get_eta_transmission(district_type): """ Returns efficency of lhn based on Fraunhofer Umsicht: Leitfaden Nahwaerme (p.51) Parameters ---------- district_type : string type of district (big, medium, small) Returns ------- eta_transmission : float efficiency factor for transmission """ if district_type is 'big': eta_transmission = 0.93 elif district_type is 'small': eta_transmission = 0.85 else: # medium eta_transmission = 0.9 return eta_transmission
6282433d8ce7d78072123b64253c3bdff6eb2017
515,254
import glob def get_files_list(folder_path, filter_term): """Return the list of files, applying filter.""" query = folder_path + filter_term files_list = glob.glob(query) return files_list
735bddd01a1ac2769c23771378c96dcafae9661a
444,942
def checkIfDuplicates_2(listOfElems): """ Check if given list contains any duplicates """ setOfElems = set() for elem in listOfElems: if elem in setOfElems: return True else: setOfElems.add(elem) return False
a7d9f322faefa4b0b0191ca96097bbf38c61ee3d
699,649
def _parse_fasta_input(fasta: str) -> dict: """ The expected input of the protein sequences are in FASTA format. http://genetics.bwh.harvard.edu/pph/FASTA.html Parse the input to retrieve the entries and sequences. :param fasta: the FASTA formatted string. :return: Return a dictionary of each entry and its corresponding sequences. """ entry_dict = {} entries = fasta.split(">") for entry in entries: if entry == "": continue entry = entry.replace("\r", "").replace(" ", "") entry_split_by_newline = entry.split("\n") if len(entry_split_by_newline) < 1: continue entry_dict[entry_split_by_newline[0]] = "".join(entry_split_by_newline[1:]) return entry_dict
9da935f8bd22afecbbbcc890417e9338b729aeb5
402,955
def hyperplane_mem(concept, plane, eta: float = 1e-12) -> bool: """ Tests whether a specified atom (hyperplane) belongs to a concept (point in parameter space) Here we assume that membership holds if point is below the plane (ie higher values of parameter result in stricter predicate) """ height = concept[1:] @ plane[:-1] + plane[-1] return concept[0] - height < 0
1055fa8cc5650368834d7a30a8b084407bddf0ae
231,511
def extract_keys(list_of_dicts, *keys): """Turn a lists of dicts into a tuple of lists, with one entry for every given key.""" res = [] for k in keys: res.append([d[k] for d in list_of_dicts]) return tuple(res)
cb832fcd9834190891235bd09a1c410a306e422b
412,139
import string import random def random_int(length: int = 4) -> str: """ generate a random str(int) and len == length :param length: Specified length,default = 4 :return: str """ all_char = string.digits lis = list() for _ in range(length): lis.extend(random.choices(all_char, k=1)) return ''.join(lis)
860fcc07c63960c77cc9d4f0ddfd2219bda46ce4
238,728
def real_ip(request): """ Returns the IP Address contained in the HTTP_X_REAL_IP headers, if present. Otherwise, `None`. Should handle Nginx and some other WSGI servers. """ return request.META.get('HTTP_X_REAL_IP')
aaec43d8abbf4a330c7ec61e9131dd2ec1591360
336,702