content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def mask_to_cidr(mask): """ Determine the CIDR suffix for a given dotted decimal IPv4 netmask. """ # convert netmask to 32 binary digits tmp = "".join([format(int(x), "08b") for x in mask.split(".")]) # count leading ones return len(tmp) - len(tmp.lstrip("1"))
81cf610029fadc6bd17bd735d3a4a8d0e2e73734
50,964
def _GetEdgeData(faces): """Find edges from faces, and some lookup dictionaries. Args: faces: list of list of int - each a closed CCW polygon of vertex indices Returns: (list of ((int, int), int), dict{ int->list of int}) - list elements are ((startv, endv), face index) dict maps vertices to edge indices """ edges = [] vtoe = dict() for findex, f in enumerate(faces): nf = len(f) for i, v in enumerate(f): endv = f[(i + 1) % nf] edges.append(((v, endv), findex)) eindex = len(edges) - 1 if v in vtoe: vtoe[v].append(eindex) else: vtoe[v] = [eindex] return (edges, vtoe)
f5c3c283868d5f2af02cbfaceeec3d6f8db01fab
50,965
def __find_service_account_in_message(message): """ The command "gcloud logging sinks create", communicates a service account Id as part of its message. Knowing the message format, this function extracts the service account Id and returns it to the caller, which will grant it with BQ permissions. Sample message: "Created [https://logging.googleapis.com/v2/projects/hipaa-sample-project/sinks/audit-logs-to-bigquery]. Please remember to grant `serviceAccount:p899683180883-075251@gcp-sa-logging.iam.gserviceaccount.com` the WRITER role on the dataset. More information about sinks can be found at https://cloud.google.com/logging/docs/export/configure_export" :param message: the message communicated by "gcloud logging sinks create" command :return: the service account Id that requires BQ permissions """ service_account = [t for t in message.split() if t.startswith('`serviceAccount:')] if service_account: service_account = service_account[0].replace('`', '') service_account = service_account.replace('serviceAccount:', '') return service_account
e791c81d62dae49b6298874dc194eda712e99857
50,967
def pluralize(n: int, singular: str, plural: str) -> str: """Choose between the singular and plural forms of a word depending on the given count. """ if n == 1: return singular else: return plural
a239d3d7304ba4fd5505c6bb1bc093392e9065e3
50,969
def some_sample_config(some_sample_path): """Return a list containing the key and the sample path for some config.""" return ["--config", some_sample_path]
257be0b4fcee1dbd340097a3f219911773ec9a31
50,970
def build_cli_args(archive_filename, out_dir): """ Build the arguments to use a command-line interface to a simulator to execute a COMBINE/OMEX archive Args: archive_filename (:obj:`str`): path to a COMBINE/OMEX archive out_dir (:obj:`str`): directory where outputs should be saved Returns: :obj:`list` of :obj:`str`: command-line arguments to execute a COMBINE/OMEX archive """ return [ '-i', archive_filename, '-o', out_dir, ]
c3f6262d5c65ac62a8ff9d1e03f5dc9b45ba7b08
50,973
from typing import Union from pathlib import Path def is_dir(dir_path: Union[Path, str]) -> bool: """If this path is a directory, return True.""" return Path(dir_path).is_dir()
6fd5edecc66fe92591ac7fb97d149b6f4bd14890
50,974
import json def try_parse_json(text): """ Parse the given JSON and return the constructed object. Return the given text unmodified if parsing as JSON fails. Return `None` if the given text is empty. """ if not text: return None try: return json.loads(text) except json.JSONDecodeError: return text
18bea77a9913266b7d07d6c2aa77591c0d2b3246
50,975
def find_merge(head1, head2): """ Given the heads of two linked lists, returns the first node where they merge or None if no such node exists. INTUITION: If head1 and head2 happen to be equidistant from the merge node (that is, if both linked lists had equal length), then it's easy: just advance one-by-one until a match is found. Indeed, it would be easy if we could traverse both lists *backwards* from the tail. To reduce to this simpler problem, we adjust the head pointers until they are equidistant. """ len1 = 0 ptr1 = head1 while ptr1: len1 += 1 ptr1 = ptr1.next_node len2 = 0 ptr2 = head2 while ptr2: len2 += 1 ptr2 = ptr2.next_node # Make each pointer equally far from the end. ptr1, ptr2 = head1, head2 while len1 > len2: ptr1 = ptr1.next_node len1 -= 1 while len2 > len1: ptr2 = ptr2.next_node len2 -= 1 # Advance both pointers one-by-one until a match is found. while ptr1: if ptr1 == ptr2: return ptr1 ptr1 = ptr1.next_node ptr2 = ptr2.next_node return None
3288c1bd2c820faced85be276a38cefd509e77d3
50,976
def demo() -> int: """ A very simple function to show docstrings with doctests are executed by pytest. Use this function like this: >>> demo() 42 """ return 42
30d3f1d85f666cee90e358c125c0ab10b1055ae5
50,977
def get_all_top_level_items(tree_widget): """ Returns all the top level items of the QTreeWidget :param tree_widget: your QTreeWidget :return: list of QTreeWidgetItems """ items = [] for index in range(tree_widget.topLevelItemCount()): items.append(tree_widget.topLevelItem(index)) return items
d0dcf2134106b520ef839db0697108fd0e455730
50,987
import json def get_dict(x): """Convert JSON to Python dict. Convert a JSON String into a Python dictionary. Args: x: JSON String Returns: Python dictionary """ return json.loads(x)
a5ec8a4f4561c89d9e623d2268208632110aa4de
50,989
def build_experiments_response(experiments: dict): """ Build the response representing a list of experiments according to the API specification. Parameters ---------- experiments: dict A dict containing pairs of ``experiment-name:experiment-version``. Returns ------- A JSON-serializable list of experiments as defined in the API specification. """ result = [] for name, version in experiments.items(): result.append({"name": name, "version": version}) return result
0c3098b5e341d30ead27ce94ef3589cf1a5a24f6
50,990
def extract_domains(email_addresses): """ Returns a list of email domains extracted from list of email addresses Parameters ---------- email_addresses: list, required Email addresses are dict of type { "address" : "recipient1@domain.test" } Returns ------- list list of email domains """ domains = set() for address in email_addresses: domains.add(address['address'].lower().split('@')[1]) return domains
17eabfbfc995198c4a0356a755e5d22689b9c581
50,992
def remove_duplicate_base_on_flowcell_id(list_runs): """ Take a list of runs and remove the duplicated run based on the flowcell id. It will remove the oldest run when two are found based on the run date. """ flowcell_to_run = {} for run_id in list_runs: date, machine, run_number, stage_flowcell = run_id.split('_') flowcell = stage_flowcell[1:] # If the run id has not been seen or if the date is newer than the previous one then keep it if flowcell not in flowcell_to_run or run_id > flowcell_to_run[flowcell]: flowcell_to_run[flowcell] = run_id return sorted(flowcell_to_run.values())
f70488e8b6aa8fd05de97e525501eb60c5baf3c9
50,996
def is_toplevel(toplevel) -> bool: """Return `True` if `toplevel`-decorated class. Returns 'True' if 'toplevel' (class or instance) has been decorated with '@toplevel'. It checks presence 'fields_sep' attribute and 'from_path' method. """ return hasattr(toplevel, "fields_sep") and callable(getattr(toplevel, "from_path", None))
7c781d31667eadc336ef91839075df8e8a8c61db
51,001
def _max_thread_width(thread): """compute the widest breadth of the thread, that is the max number of replies a comment in the thread has received""" if not thread['children']: return 0 return max( max([_max_thread_width(reply) for reply in thread['children']]), len(thread['children']) )
2689347d71177bc39f0b572c7a91782f29be641e
51,004
import six def text_repr(val): """Format values in a way that can be written as text to disk. Encode Unicode values as UTF-8 bytestrings (recommended for csv.writer), and use the str() representation of numbers. Args: val (obj): A string-able object, a `unicode` object, or `None` Returns: str: The string representation of `val` """ if isinstance(val, six.text_type): val_text = val.encode('utf8', 'ignore') elif val is None: val_text = '' else: val_text = str(val) return val_text
a7396b096f9f32e6b318530a3a1dc8f4be6545a6
51,013
import re def list_items_to_text(docs): """ Convert HTML list items to plain text. The result is in reST(reStructuredText) format, which is suitable for Python's Sphinx documentation generator, and is also very human readable. """ docs = docs.strip() # Remove any <ul> tags (the <li> tags are all we need) docs = re.sub("</?ul[^>]*>", "", docs) # Iterate through all the <li> start and end tags, tracking the nested # list depth (-1 => not in a list, 0 => in top-level list, ...) result = '' depth = -1 end_idx = 0 for li_match in re.finditer(re.compile("</?li[^>]*>"), docs): li_start = li_match.start() li_end = li_match.end() li_text = li_match.group() # Add on the next segment of text. If we're in a list, remove any # other HTML tags it contains so list items are plain text. segment = docs[end_idx:li_start].strip() if depth >= 0: segment = re.sub("<[^>]+>", "", segment) if segment: if depth >= 0: # We're in a list, so add a bullet point marker to the first # line and align any later lines with the first line's text segment = re.sub("(?m)^\\s*", " ", segment) segment = "* " + segment[2:] # Add more indentation according to the list nesting depth if depth > 0: segment = re.sub("(?m)^", " "*depth, segment) # Add the segment, with a blank line before (and later, after) # for compatibility with Sphinx if result: result += "\n\n" result += segment end_idx = li_end # Track the list nesting depth if li_text.startswith("<li"): depth += 1 elif depth >= 0: depth -= 1 # Add the final segment (assumed to not be in a list) segment = docs[end_idx:].strip() if segment: if result: result += "\n\n" result += segment return result
24c7ea0f9e8c39cfcab8b5290fead3e860994e83
51,016
def separators(tree): """ Returns a dictionary of separators and corresponding edges in the junction tree tree. Args: tree (NetworkX graph): A junction tree Returns: dict: Example {sep1: [sep1_edge1, sep1_edge2, ...], sep2: [...]} """ separators = {} for edge in tree.edges(): sep = edge[0] & edge[1] if not sep in separators: separators[sep] = set([]) separators[sep].add(edge) return separators
652d72f0c62575e9525c38a1fc728f7fcaaf5258
51,017
def get_values_map_keys(records, keyidx=0): """ Given a dict of str->2-tuples, e.g.: {'anat': [('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')], 'pet': [('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')], or Given a list of list of 2-tuples of str, e.g.: [[('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')], ('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')], Will return the unique values of each record value, in this case: {'modality', 'image_file'}. Parameters ---------- values_maps_dict: Dict[str->2-tuple] Returns ------- keys: set[str] """ if not records or records is None: return [] if isinstance(records, dict): itemset = records.values() elif isinstance(records, list): itemset = records else: raise NotImplementedError('Expected a `dict` or a `list of list` as `records, ' 'got {}.'.format(type(records))) crumb_args = set() for items in itemset: crumb_args = crumb_args.union(set([t[keyidx] for t in items])) return crumb_args
cfecb13857e72014c9ba0472404bffdf5af076d1
51,020
def compare_changes(obj, **kwargs): """ Compare two dicts returning only keys that exist in the first dict and are different in the second one """ changes = {} for k, v in obj.items(): if k in kwargs: if v != kwargs[k]: changes[k] = kwargs[k] return changes
ad88dc60cc3c93d0da15531bf0ef11e7610b1d66
51,022
def asteriskify(text, count=3): """Decorate text with asterisks Arguments: text {str} -- a text to be decorated count {int} -- number of asterisks (default: {3}) Returns: str -- a decorated text """ decor = "*" * count return "{} {} {}".format(decor, text, decor)
213810b222f3ff55103e36b619cc7157636ea9f6
51,024
import re def is_snake_case(test_string): """ test if a string is in 'snake_case' """ ptrn = "(^[a-z])([a-z0-9]+_?[a-z0-9]?)+([a-z0-9]$)" res = re.search(ptrn, test_string) return bool(res)
a2ece5cc4aab0a7d54b96b1d9d7950d230bdb5eb
51,026
def ADD(*expressions): """ Adds numbers together or adds numbers and a date. If one of the arguments is a date, $add treats the other arguments as milliseconds to add to the date. See https://docs.mongodb.com/manual/reference/operator/aggregation/add/ for more details :param expressions: The numbers or fields of number :return: Aggregation operator """ return {'$add': list(expressions)}
ef1cc072f73915e1f228dfeee20258fc51a3149e
51,029
def getAllContribsOutputStrFromSpectraOutput( spectraOutput, energyFmt=None, intensityFmt=None ): """ Gets a str to write all contributions to spectraOutput (e.g. fragA-S-3p contribution) Args: spectraOutput: (GenSpectraOutput object) This contains all information for a generated spectrum energyFmt: (Str, optional) The format string for the energies. Default = "{:.9g}" intensityFmt: (Str, optional) The format string for the intensities. Default = "{:.9g}" Returns outStr: (Str) String containing data on the contributions to the spectrum """ energyFmt = "{:.9g}" if energyFmt is None else energyFmt intensityFmt = "{:.9g}" if intensityFmt is None else intensityFmt labelList = spectraOutput.label dataList = spectraOutput.spectralContributions #Get the labels labelStrs = ", ".join( ["{}-{}-{}".format(x.fragKey, x.eleKey, x.aoKey) for x in labelList] ) labelStrs = "#labels = " + labelStrs outStrList = [labelStrs] outStrList.append( "#Energy, Intensities" ) dataStrFmt = energyFmt + ", " + ", ".join( [intensityFmt for x in range(len(dataList))] ) for idx,x in enumerate(dataList[0]): energy = x[0] currData = [ a[idx][1] for a in dataList ] outStrList.append( dataStrFmt.format( energy, *currData) ) return "\n".join(outStrList)
03b10fa3bcdc9a0458478da0b602a8eaaa770d33
51,031
def adjust_confidence(score): """Adjust confidence when not returned. """ if score is None: return 1.0 return score
0d53d9b7f0cc919d48fb0c91f9d174dd59eef56a
51,032
def hash_string(key, bucket_size=1000): """ Generates a hash code given a string. The have is given by the `sum(ord([string])) mod bucket_size` Parameters ---------- key: str Input string to be hashed bucket_size: int Size of the hash table. """ return str(sum([ord(i) for i in (key)]) % bucket_size)
f62852f7401227a6a998cc10a3b7518a5f9294dc
51,033
import torch def camera_matrix(pinholes, eps=1e-6): """ Returns the intrinsic matrix as a tensor. Args: pinholes (list): List of fx, cx, fy, cy camera parameters. eps (float, optional): A small number for computational stability. Defaults to 1e-6. Returns: torch.Tensor: Intrinsic matrix as a [4,4] Tensor. """ k = torch.eye(4, device=pinholes.device, dtype=pinholes.dtype) + eps # k = k.view(1, 4, 4).repeat(pinholes.shape[0], 1, 1) # Nx4x4 # fill output with pinhole values k[..., 0, 0] = pinholes[0] # fx k[..., 0, 2] = pinholes[1] # cx k[..., 1, 1] = pinholes[2] # fy k[..., 1, 2] = pinholes[3] # cy return k
513b23943a019a764533323509f2687df81b08d5
51,034
def biopdbresid_to_pdbresseq(biopdb_residueid,ignore_insertion_codes=False): """ Give a Bio.PDB Residue id tupe (hetatm, resseqnum, icode), return the PDB residue sequence number string consisting of the sequence number and the insertion code, if not blank. Parameters: biopdb_residueid - tuple (hetatm, resseqnum, icode) from Residue.get_id() ignore_insertion_codes - If True, a hack to make it work with PMML (only) which does not report insertion codes unlike DSSP and STRIDE Return value: string residue PDB sequence number e.g. '60' or '60A'. """ # Residue.get_id() gives tuple (hetatm, resseqnum, icode) res_seq = str(biopdb_residueid[1]) if not ignore_insertion_codes: if biopdb_residueid[2] != ' ': res_seq += biopdb_residueid[2] return res_seq
4bb023feb7bbca24f514e041a657752f34d533e0
51,036
import six def check_utf8(string): """ Validate if a string is valid UTF-8 str or unicode and that it does not contain any null character. :param string: string to be validated :returns: True if the string is valid utf-8 str or unicode and contains no null characters, False otherwise """ if not string: return False try: if isinstance(string, six.text_type): encoded = string.encode('utf-8') decoded = string else: encoded = string decoded = string.decode('UTF-8') if decoded.encode('UTF-8') != encoded: return False # A UTF-8 string with surrogates in it is invalid. # # Note: this check is only useful on Python 2. On Python 3, a # bytestring with a UTF-8-encoded surrogate codepoint is (correctly) # treated as invalid, so the decode() call above will fail. # # Note 2: this check requires us to use a wide build of Python 2. On # narrow builds of Python 2, potato = u"\U0001F954" will have length # 2, potato[0] == u"\ud83e" (surrogate), and potato[1] == u"\udda0" # (also a surrogate), so even if it is correctly UTF-8 encoded as # b'\xf0\x9f\xa6\xa0', it will not pass this check. Fortunately, # most Linux distributions build Python 2 wide, and Python 3.3+ # removed the wide/narrow distinction entirely. if any(0xD800 <= ord(codepoint) <= 0xDFFF for codepoint in decoded): return False return b'\x00' not in encoded # If string is unicode, decode() will raise UnicodeEncodeError # So, we should catch both UnicodeDecodeError & UnicodeEncodeError except UnicodeError: return False
a43c95cc3263bd092f5a430d80a3d772aa227036
51,037
import math def binomial(n, k): """Calculates the binomial coefficient.""" return math.gamma(n + 1) / (math.gamma(k + 1) * math.gamma(n - k + 1))
dee2c1466e61c5a5397b7eb2ec7b74238b19bec9
51,040
def call_if_callable(v): """ Preprocess a value: return it ; but call it, if it's a lambda (for late binding) """ return v() if callable(v) else v
14ac5ef104338685b592a58e813cf9289bfa6327
51,045
def stream_is_client_initiated(stream_id: int) -> bool: """ Returns True if the stream is client initiated. """ return not (stream_id & 1)
fe320e14f1b11c230903828f83a29a7f809aead7
51,051
import random def pick_random_move(state): """ Determine which indices into the board array contain None. These are the possible moves. Returns the index into the state array of the next move. """ possible_moves = [] for i, moves in enumerate(state): if moves == None: possible_moves.append(i) random_index_into_possible_moves = random.randint(0, len(possible_moves)-1) return possible_moves[random_index_into_possible_moves]
783e85dd2682c366ad9c21b2e9a15fcb35ca25d6
51,054
def string2bool(input_string): """ Converts string to boolena by checking if texts meaning is True, otherwise returns False. """ return input_string in ['true', 'True', 'Yes', '1']
04332faa4559b8bf6506a636c41510e6920e0165
51,055
def remove_doubles(a_list): """ Simple naive not-efficient function to remove doubles in a list but keep the order!""" new_list = [] for el in a_list: if el not in new_list: new_list.append(el) return new_list
6a9b1be053b9f828c4a4439e77b3f7d4db9102e7
51,056
import inspect def get_class(method): """Get the class of the input unbound method. Args: method (object): an unbound method or function. Returns: A class of the input method. It will be `None` if the input method is a function. """ if inspect.ismethod(method): for cls in inspect.getmro(method.__self__.__class__): if cls.__dict__.get(method.__name__) is method: return cls method = method.__func__ # fallback to __qualname__ parsing if inspect.isfunction(method): cls = getattr( inspect.getmodule(method), method.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0]) if isinstance(cls, type): return cls return getattr(method, '__objclass__', None)
cf7c273ce91080ca7e9657e2ee04eb3b594917f2
51,058
def odd_or_even(number): """Determine if a number is odd or even.""" if number % 2 == 0: # se o resto da divicao for zero return 'Even' # Par else: return 'Odd'
e7b1a23f0f55244ecf7fa148b4ffa8214d1edf78
51,061
def step(x): """ Step function """ return 1 * (x > 0)
03e770c064d185e2e019d843ee5928c244ef5c76
51,062
def getDistance(interval_a, interval_b): """Returns the distance between two intervals""" return max(interval_a[0] - interval_b[1], interval_b[0] - interval_a[1])
16fc181560ec01e5bddb7da6fbb911b242126112
51,063
def get_img_fingerprints(gray_dct_ul64_list, gray_dct_ul64_avg): """ 获取图片指纹:遍历灰度图左上8*8的所有像素,比平均值大则记录为1,否则记录为0。 :param gray_dct_ul64_list: 灰度图左上8*8的所有像素 :param gray_dct_ul64_avg: 灰度图左上8*8的所有像素平均值 :return: 图片指纹 """ img_fingerprints = '' avg = gray_dct_ul64_avg[0] for i in range(8): for j in range(8): if gray_dct_ul64_list[i][j] > avg: img_fingerprints += '1' else: img_fingerprints += '0' return img_fingerprints
68b47ea2243d1811fdd5677bb6f6b56673ec7392
51,069
def string_fx(f, x): """ Return f(x) in string format Parameters ---------- f : function f(x) x : float value Returns ------- str string format: f(x) = v """ return f'f({x}) = {f(x)}'
6cc7c73ae49d4b1568a95a4e4a2cd0cb05d94633
51,073
import re def re_compile(value): """ Argparse expects things to raise TypeError, re.compile raises an re.error exception This function is a shorthand to convert the re.error exception to a TypeError """ try: return re.compile(value) except re.error as e: raise TypeError(e)
8befbf18c3bf895fbe6e0ed6aecd53fff8388969
51,076
def harmonic(n): """ Compute the n-th Harmonic number Compute Hn where Hn = 1 + 1/2 + 1/3 + ... + 1/n Parameters ---------- n : int n-th number Returns ------- hn harmonic number """ hn = 0 for i in range(1, n): hn += 1 / i return hn
cd4d870b66b5fc037c3b3c11b3c735e91c476c6b
51,077
def _browser_list_string(browser_names): """Takes a list of browser names and returns a string representation in the format '{browser0,browser1,browser2}' :param browser_names: List of browser names :return: String representation of the browser name list """ return '{{{}}}'.format(','.join(browser_names))
0a5c3a9516c4a72db7cb947f12a47ac70394c65f
51,083
def fatorial(num, show=False): #define a função fatorial com os parâmetros num e show (padrao false) #DOCSTRING """ -> Calcula o Fatorial de um numero. :param num: o número a ser calculado. :param show: (opcional) mostrar o calculo :return: retorna o resultado do fatorial de um numero n. """ f = 1 #variavel fatorial recebe 1 for c in range(num, 0, -1): #repetição de num ate 0 subtraindo 1 if show: #se show for true print(c, end='') #printa o contador sem quebrar a linha if c > 1: #se c for maior que um print(' x ', end='') #print um x para indicar multiplicação else: #senao print(' = ', end='') #print = para o resultado final f = f * c #fatorial recebe fatorial multiplicado pelo c return f
9842b09f3e9003362844ca533c18243bf9097e94
51,087
import codecs def deserializer_with_decoder_constructor(deserialization_func, decoder_type='utf-8', decoder_error_mode='replace'): """ Wrap a deserialization function with string encoding. This is important for JSON, as it expects to operate on strings (potentially unicode), NOT bytetsteams. A decoding steps is needed in between. :param deserialization_func: The base deserialization function. :param decoder_type: The decoder type. Default: 'utf-8' :param decoder_error_mode: The decode error mode. Default: 'replace'. :return: The deserializer function wrapped with specified decoder. :rtype: bytes | bytearray | str -> T """ decoder = codecs.getdecoder(decoder_type) def deserialize(payload): decoded, _ = decoder(payload, decoder_error_mode) return deserialization_func(decoded) return deserialize
b66b6aae507385748868b6e738f5239db5e3cc27
51,089
def _default_key(obj): """Default key function.""" return obj
a5ccb0bb9f0072734f73da8a205fadc549694104
51,096
def a_or_an(s:str) -> str: """ Return 'a [str]' or 'an [str]' as appropriate.""" return f"an {s}" if s[0] in "aeiouAEIOU" else f"a {s}"
e99fad6c4e050abc05964fa6d07ff7d15ac03362
51,101
def getZoneLocFromGrid(gridCol, gridRow): """ Create a string location (eg 'A10') from zero based grid refs (col=0, row=11) """ locX = chr(ord('A') + gridCol) locY = str(gridRow + 1) return locX + locY
bce2137222d0431a4e6761fee772aaa92cacbfe3
51,107
def second_differences(signal): """The mean of the absolute values of the second differences of the raw signal""" sec_diff = [] for i in range(0,len(signal)-2): sec_diff.append(abs(signal[i+2]-signal[i])) fd_sum = sum(sec_diff) delta = float(fd_sum)/(len(signal)-2) return(delta)
fb394697e922bde829bf86f1ac97139b96afffe2
51,108
def in_range(x, a, b): """ Tests if a value is in a range. a can be greater than or less than b. """ return (x >= a and x <= b) or (x <= a and x >= b)
206c9c0cffb178267327fe127886f14f5e674740
51,118
def CalculateThePrice(TheUnitPrice, num, freight): """ 计算价格 :param TheUnitPrice: 单价 :param num: 数量 :param freight:运费 :return: 总价 """ # 单价*数量+邮费 price = TheUnitPrice * num + freight # 保留两位小数 return '%.2f' % price
c49f10ef48c147c617cbb16a684dd96a59f623bf
51,122
def count_start(tokenizer): """ A decorator which wrap the given tokenizer to yield (token, start). Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence >>> tokenizer = lambda sentence: sentence.split(' ') >>> tokenizer('The quick brown fox jumps over the lazy dog') ['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog'] >>> tokenizer = count_start(tokenizer) >>> tokenizer('The quick brown fox jumps over the lazy dog', 0) ('The', 0) ('quick', 4) ... """ def wrapper(context, base): tokens = list(tokenizer(context)) flag = 0 for token in tokens: start = context.index(token, flag) flag = start + len(token) yield (token, base + start) return wrapper
2f04e09c311102c02b37ca1c49934f49b49178dd
51,125
def ListJoiner(list): """ Takes in a nested list, returns a list of strings """ temp_list = [] for item in list: i = " ".join(item) temp_list.append(i) return temp_list
0918967a6ae0d561dd216f2b9ad8089dad28f3ef
51,129
def data2_eq(data2): """ Returns a function that matches to the specified data2 value. """ return lambda m: m.data2 == data2
4d71f6f88f6ea264dbf1058a200f9700b8ce3574
51,131
import fnmatch def filefilter(filename): """Filter list of files for .jpg and return those""" return fnmatch.fnmatch(filename, '*.JPG') or fnmatch.fnmatch(filename, '*.jpg') or fnmatch.fnmatch(filename, '*.jpeg')
c10f25b2f6f334613bb18558a0fdbfb200c3243d
51,134
def get_chipseq_atacseq_qc_summary(quality_metric, qc_type): """ Chipseq and Atacseq QCs both have common QC Summary metrics. This method calculates the metrics within quality_metric_summary calculated property """ def round2(numVal): return round(numVal * 100) / 100 qc_summary = [] if 'overlap_reproducibility_qc' in quality_metric: if 'idr_reproducibility_qc' in quality_metric: qc_method = 'idr' else: qc_method = 'overlap' opt_set = quality_metric.get( qc_method + "_reproducibility_qc")["opt_set"] qc_summary.append({"title": "Optimal Peaks", "value": str(quality_metric.get(qc_method + "_reproducibility_qc")["N_opt"]), "numberType": "integer"}) qc_summary.append({"title": "Rescue Ratio", "tooltip": "Ratio of number of peaks (Nt) relative to peak calling based" + " on psuedoreplicates (Np) [max(Np,Nt) / min (Np,Nt)]", "value": str(round2(quality_metric.get(qc_method + "_reproducibility_qc")["rescue_ratio"])), "numberType": "float"}) qc_summary.append({"title": "Self Consistency Ratio", "tooltip": "Ratio of number of peaks in two replicates [max(N1,N2) / min (N1,N2)]", "value": str(round2(quality_metric.get(qc_method + "_reproducibility_qc")["self_consistency_ratio"])), "numberType": "float"}) qc_summary.append({"title": "Fraction of Reads in Peaks", "value": str(round2(quality_metric.get(qc_method + "_frip_qc")[opt_set]["FRiP"])), "numberType": "float"}) elif 'flagstat_qc' in quality_metric or 'ctl_flagstat_qc' in quality_metric: pref = '' if 'ctl_flagstat_qc' in quality_metric: pref = 'ctl_' # mitochondrial rate (only for ATAC-seq) if qc_type == 'QualityMetricAtacseq': total = quality_metric.get( pref + "dup_qc")[0]["paired_reads"] + quality_metric.get(pref + "dup_qc")[0]["unpaired_reads"] nonmito = quality_metric.get( pref + "pbc_qc")[0]["total_read_pairs"] mito_rate = round2((1 - (float(nonmito) / float(total))) * 100) qc_summary.append({"title": "Percent mitochondrial reads", "value": str(mito_rate), "numberType": "percent"}) qc_summary.append({"title": "Nonredundant Read Fraction (NRF)", "value": str(round2(quality_metric.get(pref + "pbc_qc")[0]["NRF"])), "tooltip": "distinct non-mito read pairs / total non-mito read pairs", "numberType": "float"}) qc_summary.append({"title": "PCR Bottleneck Coefficient (PBC)", "value": str(round2(quality_metric.get(pref + "pbc_qc")[0]["PBC1"])), "tooltip": "one-read non-mito read pairs / distinct non-mito read pairs", "numberType": "float"}) final_reads = quality_metric.get( pref + "nodup_flagstat_qc")[0]["read1"] # PE if not final_reads: final_reads = quality_metric.get( pref + "nodup_flagstat_qc")[0]["total"] # SE qc_summary.append({"title": "Filtered & Deduped Reads", "value": str(final_reads), "numberType": "integer"}) return qc_summary if qc_summary else None
448242a2af9f1f7a37840b8f488dd5c4d702dd79
51,136
def info(text): """Create a pretty informative string from text.""" return f"\033[92m{text}\033[m"
15f0ebda91ce2de8a47d488cbb8b5644bde96b8f
51,138
def deltas(errors, epsilon, mean, std): """Compute mean and std deltas. delta_mean = mean(errors) - mean(all errors below epsilon) delta_std = std(errors) - std(all errors below epsilon) """ below = errors[errors <= epsilon] if not len(below): return 0, 0 return mean - below.mean(), std - below.std()
b91d95b09eb3a138f2d6323ac818a90be5e0d534
51,139
def parse_mbld(s: str) -> float: """ Turns the WCA multiblind format into a number. """ diff, time = 99 - int(s[0:2]), int(s[2:7])/60 return diff + max((60 - time)/60, 0)
f59ac36557d565e1a3acbe0947b8d6830cf23b70
51,144
def _map_args_test(x: float, y: float=2, z: float=3) -> float: """A test function for the map_args function. Returns the sum of x, y, z""" return x + y + z
20e63e40c0c98151ef6199776c1c6296ce5b39b8
51,148
import re def toIsoDateTime(value): """ Convert a datetime to an ISO8601 formatted dateTime string. :param value: the dateTime to convert :returns: an ISO8601 formatted string version of the dateTime """ rv = value.isoformat() if value.tzinfo is None: rv += 'Z' else: # replace +00:00 timezone with Z rv = re.sub('[+-]00:00$','Z',rv) return rv
1e029fb6b6ad60cb20bb6cc134c448d4a4f89a15
51,156
def read_sequence(filepath, labels=True): """ Return the list of protein sequences and cleavage sites from datapath. The data file must strictly follow the following pattern: - first line is a description of the sequence - second line is the sequence itself - third line is a list of chars S, C, M Example: 52 AGP_ECOLI 22 GLUCOSE-1-PHOSPHATASE PRECURSOR (EC 3.1.3.10) (G1PASE). MNKTLIAAAVAGIVLLASNAQAQTVPEGYQLQQVLMMSRHNLRAPLANNGSV SSSSSSSSSSSSSSSSSSSSSSCMMMMMMMMMMMMMMMMMMMMMMMMMMMMM """ protein_sequence = [] cleavage_site = [] # Loop condition conveniently discards the description lines with open(filepath, 'r') as f: while f.readline() is not '': # Slicing with :-1 to discard "\n" character protein_sequence.append(f.readline()[:-1]) if labels: cleavage_site.append(f.readline()[:-1]) return protein_sequence, cleavage_site
7df9a6ed25dc9b3f6d9005d84ddc375f30a55da4
51,158
def clean_for_viaf(text): """ assumes cleaned author form (see supporting_functions.clean_authors). Removes abbreviations and punctuation :param text: input :return: cleaned string """ text = text.strip().lower() text = text.replace(".","") text = text.replace('"',"") text = text.split() new_text = "" for t in text: if len(t) > 1: new_text += t+" " return new_text.strip()
c18abadeae4ebc015030793f64a9babd910a3f1e
51,159
def build_risk_assessment_counter(years): """Build a risk counter Args: years (int): No. of years for analysis Returns: risk_counter (list): An empty list for keeping track of bankruptcy events """ risk_counter = [] for y in range(years+1): risk_counter.append(0) return risk_counter
677d548471f7dfa825f92ac948d91e54c4a0d21c
51,165
def _retrieve_start_nodes(net, mc): """ retrieve nodes with in_degree == 0 as starting points for main path analysis args: net: a CitationNetwork object mc: minimum citations returns: list of start nodes in net """ return [node for node in net.nodes if (net.in_degree(node) == 0 and net.out_degree(node) > mc)]
ff31c50a16a24efa37fbe53f0ba4b7bd9195cb0d
51,167
from typing import List def get_corpus(documents: List[List[str]]) -> List[str]: """Get a list of all of the words in each document (includes duplicates). Args: documents (List[List[str]]): List where each element is a list of tokens in a given document. Returns: List[str]: List of all of the tokens appearing in each document, """ return [word for document in documents for word in document]
322fca0944b74bfcebf38eff3efe8aa469882e54
51,168
import json def format_file_data_into_json(data_file): """input: data_file (sequence ids separated by \n) output: json request structure of gene ids to pass to datasets api """ with open(data_file, "r") as f: content = f.read() genes = content.strip().split("\n") return json.dumps({'gene_ids': [int(gene) for gene in genes], 'include_annotation_type':['FASTA_PROTEIN']})
7f646c511a6433f88f59909d281b9ae044e45c82
51,169
def bottom_up(num_steps: int) -> int: """Compute number of steps using a bottom up approach This iterative appraoch is the best approach. Since it avoids the max recursion depth and uses O(1) space. Actually, the space complexity is more complicated. We only use 3 ints but the space needed by those 3 ints grows as n grows. I guess it actually uses O(log(n)) space? Args: num_steps: number of total steps Returns: The number of possible ways to climb the stairs """ if num_steps <= 2: return num_steps if num_steps == 3: return 4 a = 1 b = 2 c = 4 for _ in range(4, num_steps): a, b, c = b, c, a + b + c return a + b + c
8645158380859b5fe8a84ee8b5ac5059437ecdb5
51,171
from typing import Dict def get_line_from_file(input_filename: str) -> Dict[int, int]: """ Read comma seperated integers all on the same line """ with open(input_filename) as input_file: list_of_nums = [int(num) for num in input_file.readline().split(",")] return {idx: list_of_nums[idx] for idx in range(0, len(list_of_nums))}
3274eeb5a36f9ad4b460a927396e5f02a91c5c88
51,172
def calc_offset(actpre, actcount): """ compute offset in ADC counts from a pressure reading and known barometric pressure. actpre is in kPa and actcount is corresponding adc count. Recall: y = mx + b where: y is the pressure in the units of choice x is the ADC reading m is the slope b is the y offset 54mv/kpa from data sheet but that is scaled for a 5.1V supply so adjust 94kPa measured pressure 209 ADC count sensitivity = 54mv/kPa * 5V/5.1V = 52.94 mV/kPa converting to ADC counts... 52.94 mV/kPa * 255 counts/5,000mV = 2.7counts/kPa We really want the inverse of that or 0.37037 kPa/count so we have: y = 5000mV / 255counts / sensitivity * x + b 5000mv / 255 counts = 0.37037 kpA/count 94 kPa = 0.37037 kpA/count * 209 counts + b b = y - mx """ sensitivity = 54 * 5 / 5.1 # mv/kPa m = 5000 / 255 / sensitivity # kPa/count b = actpre - m * actcount # kPa offset = b / m # count return offset
3f92d044b15df5dea3cddedfa952e81e396e92ec
51,178
import torch def default_device() -> str: """ :return: the device that should be defaulted to for the current setup. if multiple gpus are available then will return a string with all of them, else if single gpu available then will return cuda, else returns cpu """ if not torch.cuda.is_available(): return "cpu" if torch.cuda.device_count() < 2: return "cuda" device_ids = [str(i) for i in range(torch.cuda.device_count())] return "cuda:{}".format(",".join(device_ids))
7ef23c2c39609d1188fb9f0f2197d0cb44821051
51,183
def get_image_representation(img_x, embedding_net): """ Return image representation (i.e., semantic features) for image features given embedding network. """ return embedding_net(img_x)
e8bda52c7edd60f77951513a5e078f97de93f623
51,192
def bbox_size(bbox): """Calcs bounding box width and height. :type bbox: list :param bbox: bounding box :rtype: tuple :return: width and height """ x0, y0, x1, y1 = bbox return abs(x1 - x0), abs(y1 - y0)
1032ab37e5b05e38f67121e974354ea2fcdf9385
51,193
def getFeedRate(rpm, chipLoad, numTeeth): """ Calculates the feedrate in inches per minute args: rpm = spindle speed chipLoad = chip load (inches per tooth) numTeeth = number of teeth on tool """ feedRate = rpm*chipLoad*numTeeth return feedRate
e603bdb066a53ec6efd956871a12a08a40eaf971
51,194
def stringToWordList(s): """Returns t.split() where t is obtained from s.lower() by replacing every non-letter in this string with a blank. Basically, it returns a list of the 'words' (sequences of letters) in s. PreC: s is a string. """ t = '' for c in s.lower(): if c in 'abcdefghijklmnopqrstuvwxyz': t = t + c else: t = t + ' ' return t.split()
166d192a6e3701431fc00617a9d667f0cf39a63a
51,198
def reflect_mpo_2site(mpo_2site): """Spatial reflection of a 2-site MPO. """ return tuple(reversed(mpo_2site))
8158a5c5c58f0e96716c3cc1dd26663c08fd6893
51,205
import math def manning_equation(hydraulic_radius, manning_coefficient, slope): """Manning formula estimating the average velocity of a liquid driven by gravity. :param hydraulic_radius: hydraulic radius of pipe or channel [m] :param manning_coefficient: Gauckler–Manning coefficient :param slope: slope of the hydraulic grade line [-] """ return math.pow(hydraulic_radius, 2 / 3) * math.pow(slope, 0.5) / manning_coefficient
313db8e85c74d2f346b24617ea6228627f589e57
51,213
def monpow(a, b): """Calcul de a à la puissance b Ici une explication longue de la puissance d'un nombre :math:`a^b = aa..a` b fois :param a: la valeur :param b: l'exposant :type a: int, float,... :type b: int, float,... :returns: a**b :rtype: int, float :Exemples: >> > nompow(2, 3) 8 >> > nompow(2., 2) 4.0 .. note:: c'est une version accélérée de la puissance par multiplication successives .. seealso:: pow .. warning:: a et b sont des nombres """ return a ** b
30f7b5c0ff08082594e2e2cf62509bc1c0742627
51,215
def is_int(val): """ Check if val is int Parameters ---------- val: value to check type Returns ------- True or False """ if type(val) == int: return True else: if val.is_integer(): return True else: return False
0c33396973ff601deae19e1f47352c45ca3269a6
51,217
def readable(filename: str) -> bool: """Conditional method to check if the given file (via filename) can be opened in this thread. :param filename: name of the file :type filename: str :return: true if file can be read, otherwise false :rtype: bool """ try: handler = open(filename, 'r') # If the file is closed for some reason, we don't want to # attempt reading it. result = handler.closed handler.close() return not result except IOError: #* File probably doesn't exist or not in given directory. return False
997dbd3f42ed432109169fb98b3d66f3f53341ba
51,220
def get_child_object(obj, child_name): """Return the child object Arguments: obj {object} -- parent object child_name {str} -- cild name Returns: object -- child object """ if hasattr(obj, '__getattr__'): try: return obj.__getattr__(child_name) except AttributeError: pass if hasattr(obj, '__getattribute__'): try: return obj.__getattribute__(child_name) except AttributeError: pass if hasattr(obj, '__getitem__'): try: return obj.__getitem__(child_name) except AttributeError: pass
d9404e09cdeaaf755c75675e6c0dc42f5fc7adf2
51,221
import re def clean(text): """Cleans text by: (1) removing obviously non-Coptic text; (2) turning sequences of >=1 newline into a single newline; (3) turning sequences of >=1 space into a single space; (4) spacing out ., ·, and : :param text: A string of Coptic text :return: Cleaned Coptic text """ text = text.replace(".", " .").replace("·", " ·").replace(":", " : ") uncoptic1 = r'\[F[^]]+\]' # Square brackets if they start with F uncoptic2 = r'\|F[^|]+\|' # Pipe delimiters if they start with F uncoptic3 = r'\([^\)]+\)' # Anything in round brackets uncoptic4 = r'[A-Za-z0-9|]' # Latin or numbers, pipe uncoptic = "("+"|".join([uncoptic1, uncoptic2, uncoptic3, uncoptic4])+")" text = re.sub(uncoptic, '', text) text = re.sub(r"\n+", r"\n", text) text = re.sub(r" +", r" ", text) return text
1088dc0fccb66790f622a05d58027ca3635c8134
51,223
def ping_time_to_distance(time, calibration=None, distance_units='cm'): """ Calculates the distance (in cm) given the time of a ping echo. By default it uses the speed of sound (at sea level = 340.29 m/s) to calculate the distance, but a list of calibrated points can be used to calculate the distance using linear interpolation. :arg calibration: A sorted list of (time, distance) tuples to calculate the distance using linear interpolation between the two closest points. Example (for a HC-SR04 ultrasonic ranging sensor): [(680.0, 10.0), (1460.0, 20.0), (2210.0, 30.0)] """ if not time: return 0 if not calibration: # Standard calculation using speed of sound. # 1 (second) / 340.29 (speed of sound in m/s) = 0.00293866995 metres # distance = duration (microseconds) / 29.38 / 2 (go and back) distance = time / 29.3866995 / 2 else: # Linear interpolation between two calibration points. a = (0, 0) b = calibration[-1] for c in calibration: if c[0] < time: a = c if c[0] > time: b = c; break if a == b: a = calibration[-2] distance = a[1] + (b[1] - a[1]) * ((time - a[0]) / (b[0] - a[0])) return distance
db8cba30c8d50d301b6a0e15073c527e285d4aa6
51,225
def pack_str(string): """Pack a string into a byte sequence.""" return string.encode()
5ef0e1f41db1a242c8a67a90d32397f270e2ce4e
51,230
import token def _get_definition_tokens(tokens): """ Given the tokens, extracts the definition tokens. Parameters ---------- tokens : iterator An iterator producing tokens. Returns ------- A list of tokens for the definition. """ # Retrieve the trait definition. definition_tokens = [] first_line = None for type, name, start, stop, line_text in tokens: if first_line is None: first_line = start[0] if type == token.NEWLINE: break item = ( type, name, (start[0] - first_line + 1, start[1]), (stop[0] - first_line + 1, stop[1]), line_text, ) definition_tokens.append(item) return definition_tokens
f5ffe1b5757828742777d8678fdbd4738d227aa8
51,231
def _get_ancestors(cube): """Extract ancestors from ``filename`` attribute of cube.""" ancestors = cube.attributes['filename'].split('|') return ancestors
35e5e70ea3b72e055895a9e66b4eff262ec24163
51,236
def flatten_test_results(trie, prefix=None): """Flattens a trie structure of test results into a single-level map. This function flattens a trie to a single-level map, stopping when it reaches a nonempty node that has either 'actual' or 'expected' as child keys. For example: .. code-block:: python { 'foo': { 'bar': { 'expected': 'something good', 'actual': 'something bad' }, 'baz': { 'expected': 'something else good', 'actual': 'something else bad', 'quxx': 'some other test metadata' } } } would flatten to: .. code-block:: python { 'foo/bar': { 'expected': 'something good', 'actual': 'something bad' }, 'foo/baz': { 'expected': 'something else good', 'actual': 'something else bad', 'quxx': 'some other test metadata' } } """ # Cloned from webkitpy.layout_tests.layout_package.json_results_generator # so that this code can stand alone. result = {} for name, data in trie.iteritems(): if prefix: name = prefix + '/' + name if len(data) and not 'actual' in data and not 'expected' in data: result.update(flatten_test_results(data, name)) else: result[name] = data return result
8204b5a2ccec23cb8323b6709979114fef12633e
51,237
def calculate_reward(state, v): """Calculate reward for player v based on current state.""" status = state.get_status() r = 0.0 if status == 0: r = -1.0 elif status == v: r = 2.0 else: r = -2.0 return (r, status)
5d575afbb34b56f8097766f2027596e2b6759238
51,239
def continue_to_get_objects(vim, token): """Continues to get the list of objects of the type specified.""" return vim.ContinueRetrievePropertiesEx( vim.service_content.propertyCollector, token=token)
6f5632a9f7f1ea2da5be76088c5565d101f32114
51,241
def RC(annotation, prediction): """ Kendall rank correlation coefficient """ number_con = 0.0 number_dis = 0.0 for i in range(0, len(prediction)): for j in range(i + 1, len(prediction)): if annotation[prediction[i]][0] < annotation[prediction[j]][0]: number_con += 1 else: number_dis += 1 return (number_con - number_dis) / len(prediction) / (len(prediction) - 1) * 2
d2ab2af4333bc8cfb43b69e5291f7e0002296717
51,246
def human_readable(solutions): """Print letter solution in a human-readable way Parameters ---------- solutions : pandas.DataFrame Letter draw best solutions, ordered by number of letters Returns ------- str Human-readable version of `solutions` """ result = "" for i, group in solutions.groupby("nb_letters"): result = result + str(i) + " letters:" + "\n" result = result + " ".join(group["display"]) + "\n\n" return result
becca40e87db069eaf256e6514f6d47b05e7e1f0
51,247
from typing import Counter def label_sizes(pages): """ pages: list(Page) returns: dict[str: int] For all labels with landings in the page list, assign each label a "size", from 1 to 5, based on the total number of pages with that label. (A larger size means the label contains more pages.) """ # Note: this is basically calculating a 5-bin histogram, but I didn't want # to add another dependency like numpy just to do this. labels = [page["landing_for"] for page in pages if "landing_for" in page] label_counts = Counter() for page in pages: if "labels" in page: label_counts.update(page["labels"]) if not label_counts: return {} total_labels = len(label_counts.keys()) label_sizemap = {} size = 5 for i, (label, count) in enumerate(label_counts.most_common()): if 1 - (i / total_labels) < (size - 1) / 5: size -= 1 label_sizemap[label] = size return label_sizemap
785ac402645eee2ac91a66d538f39e8e64f56fa3
51,248
from typing import Iterable def validation_failed_dict( items: Iterable, dict_code: int = 1001, dict_message: str = 'Validation Failed' ): """Generate dict for failed validation in format of DRF-friendly-errors. Attributes: items: Items to put into dict. In format: [(code, field, message), ...] Returns: dict: In format of DRF """ return { 'code': dict_code, 'message': dict_message, 'errors': [ {'code': i[0], 'field': i[1], 'message': i[2]} for i in items ] }
01de1c3bc47a4a44005b3e7cd36cf9f5444b7d62
51,253
def _train_and_score(clf, X, y, train, test): """ Fit a classifier clf and train set and return the accuracy score on test set""" clf.fit(X[train], y[train]) return clf.score(X[test], y[test])
886397f99ff3b21cc62100269c9b2c8d5bca8669
51,257
import re def camelize(s): """ Convert underscores to camelcase; e.g. foo_bar => FooBar """ return s[0].upper() + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), s[1:])
ff1a6d87f1d10171276be4ae8529850a23a93df9
51,263
def format_step(action_id, step, index, notes): """ reformat a step (dictionary) into a common response format """ return { 'url': '/actions/{}/steps/{}'.format(action_id, step.get('task_id')), 'state': step.get('state'), 'id': step.get('task_id'), 'index': index, 'notes': notes }
58dec56de2f554d1736a639b5b217ebccca63265
51,267
def manage_groups(dset): """ manage_groups description: Prompts the user with options to manage group assignments on the Dataset instance: - Assign indices to a group - View assigned group indices - Get data by group Returns a boolean indicating whether the user is finished with assigning groups. (this function gets called again if False is returned) parameters: dset (lipydomics.data.Dataset) -- lipidomics dataset instance returns: (bool) -- finished managing groups """ print('Managing groups... What would you like to do?') print("\t1. Assign group") print("\t2. View assigned groups") print("\t3. Get data by group(s)") print('\t"back" to go back') option = input('> ') if option == "1": print("Please provide a name for a group and its indices in order of name > starting index > ending index." "\n\t* group name should not contain spaces\n\t* indices start at 0\n\t* example: 'A 1 3'") group = input('> ') group = group.split() name = group[0] indices = [_ for _ in range(int(group[1]), int(group[2]) + 1)] try: dset.assign_groups({name: indices}) print('! INFO: Assigned indices: {} to group: "{}"'.format(dset.group_indices[name], name)) except ValueError as ve: print('! ERROR:', ve) print("! ERROR: Failed to assign group, please check your formatting and try again") elif option == "2": for group in dset.group_indices: print('\t"{}": {}'.format(group, dset.group_indices[group])) return False elif option == "3": print("Which group would you like to view?") name = input('> ') print(dset.get_data_bygroup(name)) return False elif option == 'back': return True else: print('! ERROR: unrecognized option: "{}"'.format(option)) return False
92b96bcfb387018c4148329cea577fe19c5c9290
51,270