content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import inspect def is_enum(e: type) -> bool: """Check if the given class is an enumeration. Args: e: The class object to check. Returns: True if the object is an enumeration (boost::python enumeration, not python) False otherwize. """ # Yet to find a better way... if not isinstance(e, type): return False return any( "{}.{}".format(c.__module__, c.__name__) == "Boost.Python.enum" for c in inspect.getmro(e) )
818ec32ed6b5d6a2b83c474ce6aad2982ac81736
105,348
def _inline_single_path(path, all_paths): """Inline a path. * path must be in primitive recursive form: [(src, n1, e_id1), (n1, dest, None)] * all_paths must contain all paths in the graph that start at node n1, in aggregated and inlined form. If there is only one node from n1 to dest, the inlining returns an inlined representation of the path: [(src, n1, e_id1), edges of the path from n1 to dest] otherwise, path is returned unchanged (since inlining would generate multiple paths. """ if len(path) == 1: return path else: e0, (src, dest, e_id) = path assert e_id is None sub_paths = all_paths[src][dest] if len(sub_paths) == 1: return [e0] + sub_paths[0] else: return path
9b7f45f4b9dbceda77ccf73b4f8efbd642f8108f
105,354
def _extractTildeSeparatedPair(string): """Extract both parts from a string of the form revision~timestamp, where the ~timestamp component is optional. If no timestamp is present, returns (revision, None). """ i = string.find('~') if i == -1: return string, None else: return string[:i], string[i+1:]
0c840a9fae6a3925d754b38f119087ff86868ecd
105,356
def make_binary(df, cols): """Converts continuous columns to binary columns. Parameters ---------- df : Pandas DataFrame cols : List of columns that need to be converted in the DataFrame Returns ------- DataFrame with continuous columns replaced by binary columns""" # copy the original df binary_df = df.copy() # map 1 if the value is greater than 0, otherwise 0 to each column in the list binary_df[cols] = binary_df[cols].applymap(lambda x: 1 if x > 0 else 0) # rename the columns to include 'binary' instead of 'yrs' for col in cols: binary_df.rename(columns = {col: col[4:] + '_binary'}, inplace=True) # return new DataFrame return binary_df
7bb19d08f9116921706dc93738711e5fa7295af6
105,357
def parse_db_list(dbstr): """ Parse the customer provided db list and convert to space separated string Args: dbstr (string): comma separated dblist Returns: dbs (string) : space separated dblist """ dbs="" if dbstr is None or dbstr.strip()=="" or dbstr.strip().upper()=="ALL": dbs= "ALL" else: dbstr = dbstr.strip() dblist = dbstr.split(",") for db in dblist: db=db.strip() dbs+=db dbs+=" " dbs+= "mysql" dbs = dbs.strip() return dbs
02da0735effdc1203d5c325dd4c4751da7380b06
105,360
import math def get_long_lat(x, y, z): """This should be the inverse of get_coord sphere latitude = asin (y/R) and longitude = atan2 (z,x). >>> get_long_lat(-18.50833156796647, 6.840402866513374, 3.2635182233306983) (-80.0, 20.0, 20.0) >>> get_long_lat(0.0, 20.0, 0) (0.0, 90.0, 20.0) >>> get_long_lat(5.0, 27.0, 11) (24.443954780416536, 65.89051882013823, 29.58039891549808) >>> get_long_lat(10.0, 14.14213562373095, 10.000000000000002) (44.99999999999999, 44.99999999999999, 20.0) >>> """ r = math.sqrt(x ** 2 + y ** 2 + z ** 2) try: lat = math.degrees(math.asin(y / r)) long = math.degrees(math.atan2(x, z)) if long > 90: long = (long - 90) * -1 except ZeroDivisionError: long = 0 lat = 0 return long, lat, r
e8eb1bab48d89f546784f2c4f5881d94640dd889
105,362
def __lt__(self,other): # subpports syntax S < T """Return true if this set is a proper subset of other.""" if len(self) >= len(other): return False # proper subset must have strictly for e in self: if e not in other: return False # not a subset since element missing from other return True # success; all conditions are met
ef92e1f9bb01e8de4bd862530bbd35ddd1e728b7
105,364
def crop_boxes(boxes, x_offset, y_offset): """ Crop the boxes given the offsets. Args: boxes (array): boxes to crop. x_offset (int): offset on x. y_offset (int): offset on y. """ boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return boxes
ddb181afcf039285596199625021537b52e30c19
105,366
def jaccard(ann1, ann2): """ The Jaccard distance [1] between the boolean volumes as point sets. The Jaccard distance is one minus the "intersection over union" score and is a value between 0 and 1. Distance 0 indicates perfect overlap (intersection/union = 1), while distance 1 indicates no overlap. [1]: https://en.wikipedia.org/wiki/Jaccard_index """ A1 = ann1._as_set() A2 = ann2._as_set() I = len(A1.intersection(A2)) return 1.0 - I*1.0 / len(A1.union(A2))
7ee65eadc31ba98a217182e84843fffa19033a43
105,367
import numbers def is_numeric_token(token): """ Says whether or not the token input is a number Parameter --------- token : token The token to be evalueated Returns ------- bool: Whether or not this token is a numeric character """ return isinstance(token, numbers.Number)
52d8a4e9f7003520bbbb860da56fb87feae3dea1
105,373
import torch def collate_fn_rank(samples): """ Creates a batch out of samples. Assumes samples is a list of sample of the form: Sample: ([(premise + ending, # of tokens for permise), ... 4x] The first item must be the correct ending. Returns: (premise tensor, endings tensor) """ # Maximum length of tokens max_len = max(len(x) for sample in samples for x, _ in sample) # Mask the premise as well as padding x_mask = torch.ByteTensor([ [[0] * prem_len + [1] * (len(x) - prem_len) + [0] * (max_len - len(x)) for x, prem_len in sample] for sample in samples ]) # Pad all token lists to same length x = torch.LongTensor([[x + [0] * (max_len - len(x)) for x, _ in sample] for sample in samples]) return x, x_mask
d1b3842f08c8c2b0c74b2d44d50d23c1d65425ac
105,376
def _get_exc_info(span): """Parse span status description for exception type and value""" exc_type, exc_val = span.status.description.split(":", 1) return exc_type, exc_val.strip()
49723932c38527430c5d453ed49ca9b3f5e49caf
105,377
def _isurl(filename): """ Determine if filename is a URL. Valid URLs start with 'http://' 'https://' 'ftp://' """ if filename[0:7].lower() == 'http://': return True elif filename[0:8].lower() == 'https://': return True elif filename[0:6].lower() == 'ftp://': return True else: return False
d6250182db005732dea189f57c6a827f97feebc8
105,382
import operator def getattrd(obj, name): """Same as ``getattr()``, but allow dot notation lookup.""" try: return operator.attrgetter(name)(obj) except AttributeError: return None
7cccb0da46c1bc7c9280ce38ac978bced385822a
105,386
import requests def download_file(url, local_filename): """ Taken from here NOTE the stream=True parameter https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py/16696317#16696317 :param url: :param local_filename: :return: """ r = requests.get(url, stream=True) if r.ok: with open(local_filename, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) # f.flush() commented by recommendation from J.F.Sebastian return local_filename else: return None
bd93169826d5f774fa8fd7a46ffcc370f4b0f40b
105,387
import gzip def get_io_wrapper(file_name, mode=None): """Get Wrapper to read a file name line by line. Useful to pass to functions which need all lines from a file Args: file_name (str): Path to file mode (str): Mode (usually 'r' or 'rt' or 'rb') Returns: _io.TextIOWrapper """ if file_name.strip().endswith('.gz'): if mode is None: mode = 'rt' return gzip.open(file_name, mode) else: if mode is None: mode = 'r' return open(file_name, mode)
67abd178a06290556488896b05a8b2161c4d4cab
105,395
def _get_cfn_parameters(availability_zone, internet_gateway_id, public_cidr, vpc_id, private_cidr=None): """Create cloudformation-compatible stack parameter given the variables.""" parameters = [ {"ParameterKey": "AvailabilityZone", "ParameterValue": availability_zone}, {"ParameterKey": "InternetGatewayId", "ParameterValue": internet_gateway_id}, {"ParameterKey": "PublicCIDR", "ParameterValue": public_cidr}, {"ParameterKey": "VpcId", "ParameterValue": vpc_id}, ] if private_cidr: parameters.append({"ParameterKey": "PrivateCIDR", "ParameterValue": private_cidr}) return parameters
eb8abcd0fba48b0d01037169856cbe561aae19f3
105,396
def probability_string(probability: float) -> str: """ Converts a probability to a string. """ return f"{probability:.2f}".replace("0.", "0,").replace("0,0", "0,0")
2489fb558837663c0ceed80a4d3171b8243eaa69
105,398
def get_off_geometry_from_group(group): """ Get geometry information from an NXoff_geometry group :param group: NXoff_geometry and parent group in dictionary :return: vertices, faces and winding_order information from the group """ vertices = group["geometry_group"]["vertices"][...] return ( vertices, group["geometry_group"]["faces"][...], group["geometry_group"]["winding_order"][...], )
faf437f1510aac165b71f7721e91baa1bb9d9108
105,400
import shutil def zip_folder(source_dir_path, zip_path): """Utility function for zipping a folder into .zip archive Args: source_dir_path (str): path to the folder that is going to be zipped zip_path (str): specify the path of the zip file which will be created Returns: str: the full path to the produced zip file (with the .zip extension appended) """ if zip_path[-4:] == '.zip': zip_path = zip_path[:-4] shutil.make_archive(zip_path, 'zip', source_dir_path) return zip_path + '.zip'
e389ad6d858f569efe7681916dbc11d6f288ecb4
105,413
from typing import OrderedDict def sort_dict(d): """Return an OrderedDict like `d` but with sorted keys. Parameters ---------- d : mapping Returns ------- od : OrderedDict """ return OrderedDict([(k, d[k]) for k in sorted(d.keys())])
385eecbb433a60f03a3be3cdc5d3c3b8c4e45de8
105,420
def list_iterator(list_argument): """ Iterate over a number of list elements. Parameters ---------- list_argument : list A list with elements. Returns ------- range A range which iterates as many times as the number of elements. """ return range(len(list_argument))
a0af7ca3d1d0dd3121a460cb70414f6a224d960c
105,421
import functools import operator def prod(xs): """Product (as in multiplication) of an iterable. """ return functools.reduce(operator.mul, xs, 1)
87381e7da37b9164767720f2886d5ce47b8df778
105,431
import logging def get_pft_vals(key: str, rec: dict) -> tuple: """ Takes a lung function test (eg TLco, etc) as key and returns an 4-tuple with measured, predicted, %predicted, SR Fills tuple position with None if value doesn't exist Use get_spiro_vals() for measures with reversibility """ if not key in rec: logging.error('Lung function, get_pft_vals() - no data found') return (None, None, None, None) measured = (rec[key]['Measured_pre'] if 'Measured_pre' in rec[key] else None) pred = (rec[key]['Predicted'] if 'Predicted' in rec[key] else None) percent = (rec[key]['Percent_pred_pre'] if 'Percent_pred_pre' in rec[key] else None) sr = (rec[key]['SR_pre'] if 'SR_pre' in rec[key] else None) return (measured, pred, percent, sr)
67b87c242a054d36a8ae0b2b7f600c3ef2702e95
105,433
def get_nop() -> str: """Return marker for NOP, which represents a docker command.""" return "/bin/sh -c #(nop)"
3ecd0760b7f77609e742aafe12c1d0642893650c
105,434
def fibonacci(n, p=1, q=-1): """Generalized Fibonacci sequence (aka Lucas sequence) The number F(n) is computed with the following formula: F(n) = p*F(n-1) - q*F(n-2), where p and q are two integers. With p==1 and q==-1 we have the standard Fibonacci sequence: F(n) = 1*F(n-1) - (-1)*F(n-2) = F(n-1) + F(n-2) Args: n: position in the sequence of the number to return. E.g. with n==5, the fifth number in the sequence will be returned. p: parameter `p` of the formula F(n) = p*F(n-1) - q*F(n-2) q: parameter `q` of the formula F(n) = p*F(n-1) - q*F(n-2) """ if n in (0, 1): return n n1, n2 = 0, 1 for _ in range(1, n): n1, n2 = n2, p * n2 - q * n1 return n2
9fade6991044b8790208fb4d290891aae460cfbe
105,435
def winner(game): """ Returns winner of game. 0 for black, 1 white, -1 tie. """ results = game.headers['Result'] if (results == '0-1'): #print("Black wins.") return 0 elif (results == '1-0'): #print("White wins.") return 1 elif (results == "1/2-1/2"): #print("Tied.") return -1 else: raise Exception("Can't tell who won. Is the header mangled?")
efce1b16da68525382e3fc092f95faa1de7b8903
105,438
def path_macro_sub(s, ip='', dp='', gp='', ei=''): """ Replace macros with current paths: - <INSTALL_DIR> is replaced with the contents of ip - <DATA_DIR> is replaced with the contents of dp - <GLOBAL_DATA> is replaced with the contents of gp - <EVENT_ID> is replaced with the contents of ei e.g., path_macro_sub("<INSTALL_DIR>/<DATA_DIR>", "hello", "world") would return "hello/world". It is not an error if the original string does not contain one or any of the substitution strings. Args: s (str): The string into which the replacements are made. ip (str): The string with which to replace <INSTALL_DIR>. dp (str): The string with which to replace <DATA_DIR>. gp (str): The string with which to replace <GLOBAL_DATA>. ei (str): The string with which to replace <EVENT_ID>. Returns: str: A new string with the sub-string replacements. """ s = s.replace('<INSTALL_DIR>', ip) s = s.replace('<DATA_DIR>', dp) s = s.replace('<GLOBAL_DATA>', gp) s = s.replace('<EVENT_ID>', ei) return s
6c847345d572859b5f852933d7654ac9a6ae0a10
105,439
def maybe_unsorted(low, high): """Tells if a half-open interval is big enough to possibly be unsorted.""" return high - low > 1
f5ad7462f72f73809d47755ff9319e9418acf655
105,444
def is_list_or_tuple(value): """Test if a given value is a list or tuple that can be converted into multiple arguments. Parameters ---------- value: any Any object that is tested for being a list or tuple. Returns ------- bool """ return isinstance(value, list) or isinstance(value, tuple)
11f927a4164cacdf67391f95c24cffa6d2bcebfd
105,445
def color_mark(mark): """Add color to the mark output.""" return "\033[38;5;36m{}\033[0;0m".format(mark)
a579225dc513cf8edcff030345db55f2f61d924c
105,450
def to_bikel_format(tagged_sents): """ Converts to bikel format (bracketing). Takes multiple sentences where each sentence is a list of (word, tag) tuples. Parameters:: tagged_sents (list(list(tuple(str, str)))) Return type:: (str) """ result = "" for sentence in tagged_sents: result += "(" for item in sentence: result = result + "("+item[0]+" "+"("+item[1]+")) " result += ") " return result
d54ce1b29e6472a97768e96cfa02a4705c27e35e
105,454
import json def format_containers(containers, json_out): """Format container data for Ansible Args: containers: [(hostname, metadata), ...] json_out: If True, return JSON, else dictionary. Returns: Dictionary of container information formatted to Ansible specs. """ data = {'all': {'vars': {'ansible_connection': 'docker'}, 'hosts': [], '_meta': {'hostvars': {}}}} for host, metadata in containers: # docs use dict keys set to none, but maybe all is special? # data['all']['hosts'][host] = None data['all']['hosts'].append(host) if metadata: data['all']['_meta']['hostvars'][host] = {'docker_metadata': metadata} return json.dumps(data) if json_out else data
cc25ceb34d2029e2f43f78d10f6dce0c4f3074aa
105,459
def get_labels(crf): """ Returns all possible labes of the model (without 'O') :param crf: :type crf: sklearn_crfsuite.CRF :return labels: :rtype labels: list """ labels = list(crf.classes_) labels.remove('O') return labels
ec85b053573671e10bc0a6cd7820c42d90a1bcad
105,474
def extract(token: str) -> str: """Extract label from transition.""" return token[1:-1]
3d8ae3ef9f04a562890275ad7c38b9e549ab881a
105,481
def json_serial(obj): """JSON serializer for objects not serializable by default json code >>> from datetime import date, datetime >>> json_serial(date(2018, 12, 12)) '2018-12-12' >>> json_serial(datetime(2018, 12, 12, 12, 12)) '2018-12-12T12:12:00' >>> json_serial("") Traceback (most recent call last): ... TypeError: Type <class 'str'> not serializable """ try: return obj.isoformat() except AttributeError: raise TypeError("Type %s not serializable" % type(obj))
801a545cbe2e0ec8d263d57882cd1d8c07ea1707
105,487
def uniq(iterable): """ Removes duplicates items from `iterable` while keeping their order. Examples: ```python from flashback.iterating import uniq for user_id in uniq([1058, 1058, 85, 9264, 19475, 85]): print(user_id) #=> 1058 #=> 85 #=> 9264 #=> 19475 # Keeps order assert set([1, 1, 3, 4, 5, 5]) != uniq([1, 1, 3, 4, 5, 5]) ``` Params: iterable (Iterable<Any>): the iterable to remove duplicates from Returns: tuple<Any>: the iterable without duplicates """ unique = [] seen = set() for item in iterable: repr_item = repr(item) if repr_item in seen: continue unique.append(item) seen.add(repr_item) return tuple(unique)
983129a5559a6292708388228af7973bafa37865
105,490
import re def shortstr(string): """ Shorten string by removing long floats :param string: string, e.g. '#810002 scan eta 74.89533603616637 76.49533603616636 0.02 pil3_100k 1 roi2' :return: shorter string, e.g. '#810002 scan eta 74.895 76.495 0.02 pil3_100k 1 roi2' """ #return re.sub(r'(\d\d\d)\d{4,}', r'\1', string) def subfun(m): return str(round(float(m.group()), 3)) return re.sub(r'\d+\.\d{5,}', subfun, string)
377a9a5ed8f7d2831eb505cbe72910fd688addcf
105,492
def calculate_age_at(dob, date): """ Calculate the age of a person from his date of birth. """ return ( date.year - dob.year - ((date.month, date.day) < (dob.month, dob.day)) )
524f199449bd42a594a20856de4a64524a075ec5
105,493
def func_file_input(filepath, comment_char='#', dtype=float, bool_tail=True, in_keyword='PAIR', cut_keyword='MAE', bool_force_cut_kw=False, ignore_kw='HEAD'): """Read input file Args: filepath (str) : file path comment_char (str) : char used as comments when reading dtype (type) : convert data type bool_tail (bool) : whether append tail or not in_keyword (str) : read leading words cut_keyword (str) : read triming words bool_force_cut_kw(bool) : whether force trim words exist ignore_kw (str) : ignore keyword when reading Returns: List[List] : reading data """ profile = [] with open(filepath,mode='rt') as f: while True: line = f.readline() if len(line) == 0: break line = line.strip() if len(line) == 0 or line[0] == comment_char: continue bo = False lp = line.split() if len(lp) >= 2 and lp[0] == in_keyword: if bool_force_cut_kw and line.find(cut_keyword) == -1: print('Error: in line: ' + line) raise KeyError('cut_keyword is not found') ls = [] try: for tmp in lp[1:]: if tmp != cut_keyword: ls.append(dtype(tmp)) else: if bool_tail is True: t = lp.index(tmp) + 1 if len(lp) <= t: ls.append('nan') else: ls.append(dtype(lp[lp.index(tmp)+1])) break profile.append(ls) except ValueError: print('Error: in line: ' + line) raise ValueError('cannot convert') elif len(lp) >= 2 and lp[0] == ignore_kw: pass else: print('Error: in line: ' + line) raise ValueError('Not correctly defined') return profile
d2ab23102efdc52d456e7ff888dab27cf06465d3
105,496
import re def _split_text_by_delimeter(pattern, text): """ Splits text into parts separated by delimeter that matches `pattern`. Delimeter is not included in the returned texts. For example, --pattern='\n\n---------\n\n' may be used if chapter are separated by 8 dashes. """ texts = re.split(pattern, text) if len(texts) == 0: print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n') return texts
64a2337b420f8c3c71da26c192963a10ba796a37
105,497
import inspect def get_caller_stack_name(depth=1): """ Gets the name of caller. :param depth: determine which scope to inspect, for nested usage. """ return inspect.stack()[depth][3]
14b451a01b069b6e4f2576a7f3082799cd4d5f15
105,508
def diagonal_basis_commutes(pauli_a, pauli_b): """ Test if `pauli_a` and `pauli_b` share a diagonal basis Example: Check if [A, B] with the constraint that A & B must share a one-qubit diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this function would return True. If the inputs were [sX(5), sZ(4)] this function would return True. If the inputs were [sX(0), sY(0) * sZ(2)] this function would return False. :param pauli_a: Pauli term to check commutation against `pauli_b` :param pauli_b: Pauli term to check commutation against `pauli_a` :return: Boolean of commutation result :rtype: Bool """ overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits()) for qubit_index in overlapping_active_qubits: if (pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and pauli_a[qubit_index] != pauli_b[qubit_index]): return False return True
480fb476a53e820a01593522b02fc1f3fb3bc883
105,510
import inspect def get_init_params(obj): """ Returns a list of the parameters entering the `__init__` method of the given object `obj`. Parameters ---------- obj: Serializable Returns ------- list of str """ init_params = list(inspect.signature(obj.__init__).parameters.keys()) if 'self' in init_params: init_params.remove('self') if 'kwargs' in init_params: init_params.remove('kwargs') return init_params
5bbe66f9d4300c4f164f134c3b27e37325b0c1bc
105,515
def color_pval(p_value, alpha=0.05, significant_color='red', null_color='black'): """Return a text color based on the significance of a p-value. Parameters ---------- p_value : float The p-value to check. alpha : float, optional, default: 0.05 The significance level to check against. signicant_color : str, optional, default: 'red' The color for if the p-value is significant. null_color : str, optional, default: 'black' The color for if the p-value is not significant. Returns ------- color : str Color value, reflecting the significance of the given p-value. """ return significant_color if p_value < alpha else null_color
a07ea6591771f6fab133286984ba4ccb42bdec2b
105,518
def reshape_vertex_map(points): """ If points is a image-shaped point cloud, reshape to simple array of points. :param points: (H, W, 3) or (N, 3) shape of numpy array :return: (H*W, 3) or (N, 3) shape of nunmpy array """ if len(points.shape) == 3: points = points.reshape((points.shape[0] * points.shape[1], points.shape[2])) return points
26f2684c6555d6c69eafdf6c98611b62aad6fda2
105,519
def append_or_extend(array, mixed): """ Append or extend a list. :param array: list to append or extend :type array: list :param mixed: item or list :type mixed: mixed :return: list :rtype: list """ if isinstance(mixed, list): return array.extend(mixed) else: return array.append(mixed)
bb23b09b275c8f1631e8018b58399cdcb9e859a4
105,522
def concatenador(*args, sep='/'): """ Devuelve una cadena con los valores en <args> separados por <sep>. """ return sep.join(args)
643275da534d48b3d0b79a50550cb27c0174d2ac
105,523
import asyncio def isasync(fun): """Return if fun is async.""" return asyncio.iscoroutinefunction(fun)
2a068f5c548d679cee99a4501d99372da7c55f56
105,527
def is_linkage(ID, direction, pois_df_0, pois_df_1): """ :param ID: poi ID :param direction: 0/1 :param pois_df_0: :param pois_df_1: :return: True/False, linkage ID of this Poi (which points to information in link_gdf """ if direction == 0: pois_df = pois_df_0 else: pois_df = pois_df_1 extract = pois_df[pois_df.index == ID] if extract.type_ID.to_list()[0] >= 0 and extract.pois_type.to_list()[0] == "link": return True, extract.type_ID.to_list()[0] else: return False, None
8dfac71457f96b5133dd81ad0d1f7be3276e24e0
105,531
def _get_topic_memcache_key(topic_id, version=None): """Returns a memcache key for the topic. Args: topic_id: str. ID of the topic. version: int. The version of the topic. Returns: str. The memcache key of the topic. """ if version: return 'topic-version:%s:%s' % (topic_id, version) else: return 'topic:%s' % topic_id
d4c50097dab5ff16c56e26374ce9c3a9ed0517e5
105,537
def _extract_offer_spendings(transcript_group): """Extracts the spendings per customer Args: transcript_group (pandas.DataFrame): The transcript dataset Returns: pandas.DataFrame: The modified transcript with the spendings per customer extracted """ transcript_group["recommended_offer"] = transcript_group.apply(lambda x: 0 if x.purchased == False else x.mapped_offer, axis=1).astype(int) transcript_group["spendings"] = transcript_group.apply(lambda x: x.amount + x.non_offer_amount, axis=1) return transcript_group
46392f71b5a0885f5ad3dd3c5babd8b3c5bf3655
105,544
def format_distance(distance: float) -> str: """ Formate une distance en fonction de la longueur pour proposer une chaîne de caractères lisible (arrondi, conversion). Parameters: distance: La distance à formater. Returns: La distance formatée, au mètre ou kilomètre près. """ return f"{distance:.0f}m" if distance < 1000.0 else f"{(distance / 1000.0):.0f}km"
c5ae34d6c8441263a2d9417b88e122de7d80f09c
105,547
def generate_ancestry_path(full_name): """Generate ancestry path from full_name. Args: full_name (str): Full name of the resource. Returns: str: Ancestry path. """ supported_ancestors = ['organization', 'folder', 'project'] ancestry_path = '' full_name_items = full_name.split('/') for i in range(0, len(full_name_items) - 1): if full_name_items[i] in supported_ancestors: ancestry_path += (full_name_items[i] + '/' + full_name_items[i + 1] + '/') else: continue return ancestry_path
6494810aff6bc784164db40197261cf269760a2e
105,550
import re def guess_parameters(next_line): """ Attempt to guess parameters based on the presence of a parenthesized group of identifiers. If successful, returns a list of parameter names; otherwise, returns None. """ match = re.search('\(([\w\s,]+)\)', next_line) if match: return [arg.strip() for arg in match.group(1).split(',')] else: return None
9ad257cf096071dc2f916b7520e79c154948e4e8
105,552
def find_span( knots, degree, x ): """ Determine the knot span index at location x, given the B-Splines' knot sequence and polynomial degree. See Algorithm A2.1 in [1]. For a degree p, the knot span index i identifies the indices [i-p:i] of all p+1 non-zero basis functions at a given location x. Parameters ---------- knots : array_like Knots sequence. degree : int Polynomial degree of B-splines. x : float Location of interest. Returns ------- span : int Knot span index. """ # Knot index at left/right boundary low = degree high = len(knots)-1-degree # Check if point is exactly on left/right boundary, or outside domain if x <= knots[low ]: return low if x >= knots[high]: return high-1 # Perform binary search span = (low+high)//2 while x < knots[span] or x >= knots[span+1]: if x < knots[span]: high = span else: low = span span = (low+high)//2 return span
b14decbbd049e8f7d647f8cde5142f2d5f3ece1a
105,553
def mock_get_full_name(request): """Return a full name for demo purposes only.""" return 'James Student'
6dfe5dbf46afe3792be0fde9b8e567e0cc3f92c1
105,555
def has_import(ast, name): """ Determine if the given module name is in the code. Args: ast (:py:class:`pedal.cait.cait_node.CaitNode`): The starting point to search. name (str): The name of the module to match against. Returns: bool: Whether or not the module name was matched. """ imports = ast.find_all("Import") import_froms = ast.find_all("ImportFrom") # What about ``import <name>`` if imports and any(alias._name == name for i in imports for alias in i.names): return True # What about ``from <name> import *`` if import_froms and any(i.module == name for i in import_froms): return True return False
eaf6379f9e2329c7e8d1064ef6d90bb23816bf43
105,557
import re import string def strip_punctuation(s): """ Replace punctuation characters with spaces in `s` using RE_PUNCT. """ RE_PUNCT = re.compile(r'([%s])+' % re.escape(string.punctuation), re.UNICODE) return RE_PUNCT.sub(" ", s)
185b756961f5f2ecbac577f5cc60c0755856aa2e
105,561
def discretise(scores, threshold: float = 0.5, ): """ Discretise an array of label scores in `[0, 1]` into discrete predictions in `{0, 1}`, by selecting all labels that score higher than `threshold`. Parameters ---------- scores : array shape=(n, n_classes, ) Array of class scores. Scores should be values in `[0, 1]` threshold : float=0.5 Threshold to use for selecting labels. Returns ------- predictions : array shape=(n, ) Class label for each query instance. """ return scores >= threshold
e86064cf07ee90a44477d078ecd48d77fc38e5ed
105,567
def mapper(chunk): """ The mapper function: process the raw text and returns the pairs name-value. Args: - chunk(str): the raw text from data file Return(list of tuples): a list of 2D tuples with the pairs name-value. """ Pairs = [] for line in chunk.split('\n'): data = line.strip().split(",") if len(data) == 6: zip_code, latitude, longitude, city, state, country = data Pairs.append((str(country), 1)) return Pairs
9b751d0b7ea0349f52e33665a0d5c37402a5f857
105,569
def folder_exists(course, folder_name): """ Tests whether a folder exists for a course The folder name is everything following the 'files/folder/' in the folder's URL: 'folder_name' in the case of this URL https://canvas.instance.com/courses/course_id/files/folder/folder_name OR 'folder1/folder2/folder_name' in the case of this URL https://canvas.instance.com/courses/course_id/files/folder/folder1/folder2/folder_name """ for folder in course.get_folders(): if folder.full_name == 'course files/' + folder_name: return True return False
175682727ba5ef90dc4a3b210acdbeb6db0fd6f8
105,570
def histogramm(values): """Compute the histogramm of all values: a dictionnary dict[v]=count""" hist = {} for v in values: if v in hist: hist[v]+=1 else: hist[v]=1 return hist
8ca7e71a8a41f9b013227e839d91a8bd0b1ab07f
105,571
def timeshift(df, threshold, criterion="Infected", time="time"): """Add a series of adjusted times, zeroed on a threshold number of infections. Arguments: df -- A Pandas dataframe in the format output by the SIR function threshold -- an integer number of infections to be set to time zero Keyword arguments: criterion -- the column in df containing the critrion to which the threshold applies (default "Infected") time -- the column in df containing the time variable to adjust (default "time") """ timeshift = df[df[criterion] >= threshold][time].min() df["time_adj"] = df[time] - timeshift return df
967e73ac7284db5880c97c5146e0b4b5aecb306f
105,575
def NormalCamel(name): """Convert C++ ASTClassName into normalized AstClassName. A few legacy classes have irregular names requiring special-casing in order to be remapped to an ASTNodeKind. Args: name: name of the C++ class. Returns: Normalized camel-case equivalent of the class name. """ if name == 'ASTBigNumericLiteral': return 'AstBignumericLiteral' elif name == 'ASTJSONLiteral': return 'AstJsonLiteral' elif name == 'ASTTVFSchema': return 'AstTvfSchema' elif name == 'ASTTVF': return 'AstTvf' elif name == 'ASTTVFArgument': return 'AstTvfArgument' elif name == 'ASTTVFSchemaColumn': return 'AstTvfSchemaColumn' else: return name.replace('AST', 'Ast')
f22d4942cdd33499463ef5d55adf50c55b87201b
105,578
def default_sample_function(famid, indid): """ The default function for turning a two-part :attr:`pysnptools.distreader.DistReader.iid` into a a Bgen sample. If the iid's first part (the family id) is '0' or '', the sample will be iid's 2nd part. Otherwise, the sample will be 'FAMID,INDID' >>> default_sample_function('fam0','ind0') 'fam0,ind0' >>> default_sample_function('0','ind0') 'ind0' """ if famid == "0" or famid == "": return indid else: return famid + "," + indid
e1d250f6705ad5ca85432c336270dd33fc39d566
105,579
def get_partial_dict(prefix, dictionary, container_type=dict, ignore_missing=False, pop_keys=False): """Given a dictionary and a prefix, return a Bunch, with just items that start with prefix The returned dictionary will have 'prefix.' stripped so:: get_partial_dict('prefix', {'prefix.xyz':1, 'prefix.zyx':2, 'xy':3}) would return:: {'xyz':1,'zyx':2} """ match = prefix + "." n = len(match) new_dict = container_type(((key[n:], dictionary[key]) for key in dictionary if key.startswith(match))) if pop_keys: for key in list(dictionary.keys()): if key.startswith(match): dictionary.pop(key, None) if new_dict: return new_dict else: if ignore_missing: return {} raise AttributeError(prefix)
6cdf5ad9254c3e879942f8d91506dbb93390ef22
105,581
import dataclasses import copy def _unpack_field(field: dataclasses.Field) -> dataclasses.Field: """Extract the type contained inside the generic definition. Replaces the field's type with the type contained inside its generic type, e.g.: List[int] -> int The returned field is a copy of the original, as the original resides in the class definition. Modifying it would be destructive. """ new_field = copy.copy(field) new_field.type = field.type.__args__[0] return new_field
50dfe69046d15de79da2364372a4bf9dee4f2c4c
105,582
def _area(bounds): """ Given bounds of the form [[x_min, x_max],[y_min, y_max]] return the area of the described rectangle. """ return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])
9b43768781828a4ddbd0ef99d84ef8d80a7ddde4
105,583
def get_orig(term): """ Follows the 'orig' attribute until it comes to an object without that attribute. """ while hasattr(term, 'orig'): term = term.orig return term
8f16db416cac14d937f89c710ac6ad849adca467
105,584
from typing import Dict from typing import Tuple def expectation_from_counts(counts: Dict[Tuple[int, ...], int]) -> float: """Estimates the expectation value of a circuit from shot counts. Computes the parity of '1's across all bits to determine a +1 or -1 contribution from each readout, and returns the weighted average. :param counts: Counts of each measurement outcome observed. :type counts: Dict[Tuple[int, ...], int] :return: The expectation value in the range [-1, 1]. :rtype: float """ aritysum = 0.0 total_shots = 0 for row, count in counts.items(): aritysum += count * (sum(row) % 2) total_shots += count return -2 * aritysum / total_shots + 1
b0502dbfd0517b7a5ecbacf549007cdc572ee40e
105,595
def _split_actors(actors_with_transforms): """Splits the retrieved actors by type id""" vehicles = [] traffic_lights = [] speed_limits = [] walkers = [] for actor_with_transform in actors_with_transforms: actor = actor_with_transform[0] if 'vehicle' in actor.type_id: vehicles.append(actor_with_transform) elif 'traffic_light' in actor.type_id: traffic_lights.append(actor_with_transform) elif 'speed_limit' in actor.type_id: speed_limits.append(actor_with_transform) elif 'walker.pedestrian' in actor.type_id: walkers.append(actor_with_transform) return (vehicles, traffic_lights, speed_limits, walkers)
1f3819a9028db1f1e6dc906628055f7a2a64ee7f
105,596
def sort_schedule_setpoint_items(items): """ This can be used to sort the items of schedule/setpoint. The ordering is especially relevant if items overlap, which should not exist though. But if it does, the following ensures that in case of overlap we start with the first item and switch to the second once the time of the second has come. If items have from_timestamp = None, then they will be executed at the very beginning, as everything with a specified from_timestamp is certainly thought to start later. If multiple items exist for which from_timestamp is None we take care that an item that has also to_timestamp = None is the last of the ones with from_timestamp = None, as we exepct that this item is then ment to be executed. """ def sort_key(item): """ Inspred by: https://stackoverflow.com/questions/18411560/python-sort-list-with-none-at-the-end """ return ( item["from_timestamp"] is not None, item["to_timestamp"] is None, item["from_timestamp"] ) return sorted(items, key=sort_key)
d343a918431b5341eec205fb547f5352110c6666
105,597
def _GetFloatStringPrecision(floatString): """ Gets the floating point precision specified by floatString. floatString can either contain an actual float in string form, or it can be a frame placeholder. We simply split the string on the dot (.) and return the length of the part after the dot, if any. If there is no dot in the string, a precision of zero is assumed. """ floatPrecision = 0 if not floatString: return floatPrecision floatStringParts = floatString.split('.') if len(floatStringParts) > 1: floatPrecision = len(floatStringParts[1]) return floatPrecision
4762f8ea6f2aed10dfeda8e38789e033ead7076b
105,603
def get_role_part(rawsource): """Helper function to get role name from the instruction. Args: rawsource: Role raw text. Example: :role:`text` -> :role:` """ return rawsource[:rawsource.find('`') + 1]
3e1c46ef3e8b2e58782b10ec59cdc447bacc76a4
105,607
import re def match_phone(number): """匹配手机号码""" match = r'^1\d{10}$' result = re.match(match, number) # 匹配结果,是否为真 match_status = False if result: match_status = True return match_status
299150d4cb26bd1e302d0ee9aa5ce2d990e3fcfe
105,610
def legendre(a, m): """ This function returns the Legendre symbol (a/m). If m is an odd composite then this is the Jacobi symbol. """ a = a % m symbol = 1 while a != 0: while a & 1 == 0: a >>= 1 if m & 7 == 3 or m & 7 == 5: symbol = -symbol a, m = m, a if a & 3 == 3 and m & 3 == 3: symbol = -symbol a = a % m if m == 1: return symbol return 0
45ee0938fafab5c32e8267b087d09459300229b7
105,611
def get_results_parameters(config: dict) -> tuple: """Returns recipe parameters after sanity check :param dict config: parameters defined in the recipe settings :raises: :class:`ValueError`: Missing parameters :returns: Parameters :rtype: tuple """ reference_column = config.get("user_reference", None) group_column = config.get("group_column", None) conversion_column = config.get("conversion_column", None) if not reference_column: raise ValueError("User reference column is missing") if not group_column: raise ValueError("Group column is missing") if not conversion_column: raise ValueError("Conversion column is missing") return reference_column, group_column, conversion_column
5d0ab4c82517928e8998377283a8697ef1c27e3c
105,613
def cvt_kdd_to_gname(kdd): """ Given Kdd, convert the name into gcloud compatible one Gcloud shall have small letters and dahs instead of underscore Parameters ------------ kdd: string KDD name returns: string name suited for gcloud """ t = kdd t = t.lower() t = t. replace('_', '-') return t
7e7957ba156da2739130eb05e8fae84792ba329d
105,615
def datetime_to_seconds(value): """ Converts the given datetime value to seconds. """ seconds = value.hour * 3600 + value.minute * 60 \ + value.second + (value.microsecond / 1000000.0) return seconds
874c48eb0805a0dad16ba707b7872abc19398f4b
105,616
def observed_name(name): """Return `_name_observed`.""" return "_{}_observed".format(name)
d437eec1aefd75455c28d062ec8c969179a993ca
105,627
def _get_index_and_colors(values, objects, predicate, color_mapper): """ Get index and RGB colors from a color map using a rule. The predicate acts on a tuple of (value, object). Arguments: values (Iterable): floats representing a color. objects (Iterable): objects associated to the colors. predicate (Callable): a predicate to filter objects. color_mapper (cm.ScalarMappable): a mapper from floats to RGBA. Returns: Iterables of indices and RGBA colors. """ indices = [] colors = {} for index, value in enumerate( map( lambda t: t[0], filter( lambda t: predicate(t), zip(values, objects) ) ) ): indices.append(index) colors[index] = color_mapper.to_rgba(value) return indices, colors
69388e92783d200765598b06651da1121561f98c
105,629
import math def sqrt(x): """ Returns the square root of x. """ return math.sqrt(x)
8bc68e33254080b5d58acc0f6f53548cb90ee47e
105,633
def card_ranks(hand: str) -> list: """ Возвращает список рангов (его числовой эквивалент), отсортированный от большего к меньшему. Returns a list of numerical equivalent of card ranks in a descending order. :param hand: a list of 7 cards, like: ['6C', '7C', '8C', '9C', 'TC', '5C', 'JS']. """ # define ranks rankables = "23456789TJQKA" ranks = {rankables[rank]: rank for rank in range(len(rankables))} # return sorted ranks of rankable cards return sorted([ranks[card[0]] for card in hand if card[0] != "?"], reverse=True)
d42096802de94c8d2092b31dfcb78c41c4c92bb6
105,638
def V(G): """ Returns a set of verticies on a graph Parameters ---------- G = A networkx graph. Returns ------- set of vertices belonging to graph 'G' """ return set(G.nodes())
7fbd9d54486773683b7e1a92371d3ef805356838
105,641
import requests from bs4 import BeautifulSoup def get_request(url_to_connect): """ Send a get request to the server and receive a response back :param url_to_connect: (string) a link to connect to a sever :return: a BeautifulSoup object """ resp = requests.get(url_to_connect) soup = BeautifulSoup(resp.text, 'html.parser') return soup
2d496b646d277dda4beed9aa87981369b7c677e5
105,642
import pathlib def make_file_name(directory, folder, extension): """Get a legal file name and return a Path object.""" # replace "/*. " with "_" folder_mod = folder.translate({47: 95, 42: 95, 46: 95, 32: 95}) return pathlib.Path(directory).joinpath( folder_mod + (("." + extension) if extension else "") )
95ed327c601fa7bc88500dd6686f03533643fdb6
105,653
def get_result_string(guess, solution, output): """ Resturns the result string for the given guess. For each character in the string, "-" means the letter is not in the word, "X" means the letter is in the word but the position is wrong, and "O" means the letter is in the word and the position is correct """ result = "" for i in range(len(guess)): if guess[i] == solution[i]: result += "O" elif guess[i] in solution: result += "X" else: result += "-" output[0] += 1 return result
300b38a26eb91d4563aa8a3d3a2fadc6a6efc28f
105,655
def _get_data(conn, sql_class, columns_to_select, column_to_value_map): """ Get first available row based on a list of column_name:value pairs. Return the data as a dictionary. """ columns = ', '.join(columns_to_select) column_to_value_clause = 'AND '.join( ['%s = $$%s$$' % (k, v) for k, v in column_to_value_map.items()] ) query = 'SELECT {columns} FROM {table_name} WHERE {column_to_value_clause};'.format( columns=columns, table_name=sql_class.__tablename__, column_to_value_clause=column_to_value_clause) results = conn.execute(query) data = results.first() if data: return dict(data) return {}
2575821b4991dfa7dcac1ad09d20cc7963d31b47
105,657
def enum_constant(context, builder, ty, pyval): """ Return a LLVM constant representing enum member *pyval*. """ return context.get_constant_generic(builder, ty.dtype, pyval.value)
d4e4e2d65d193d2032e9e7875e6d9a0f15ad1b80
105,662
def remove_prefix_0x(s: str) -> str: """remove prefix '0x' or '0X' from string s :param s: str :return: str, the substring which remove the prefix '0x' or '0X' """ if s[:2].lower() == '0x': s = s[2:] return s
f180bfb61cefcd13d743a86f605c2acfcfe64e78
105,665
def _FixNtfsMode(mode: int) -> int: """Fix NTFS permissions returned by TSK.""" # TSK with NTFS reports the following permissions: # r-xr-xr-x for hidden files # -wx-wx-wx for read-only files # We want to report the reversed mapping, because it makes more sense. permissions = mode & 0o777 if permissions == 0o333: return (mode & ~0o777) | 0o555 elif permissions == 0o555: return (mode & ~0o777) | 0o333 else: return mode
4bc9965ca9772df0367cbc95bf5f55d27a355023
105,668
def calculate_intensity_group(hfi: float) -> int: """ Returns a 1-5 integer value indicating Intensity Group based on HFI. Intensity groupings are: HFI IG 0-499 1 500-999 2 1000-1999 3 2000-3999 4 4000+ 5 """ if hfi < 500: return 1 if hfi < 1000: return 2 if hfi < 2000: return 3 if hfi < 4000: return 4 return 5
1a1ce1572586f840ce9c3d11dcf5438d51d6aa8b
105,673
def parameterized(dec): """ Meta decorator. Decorate a decorator that accepts the decorated function as first argument, and then other arguments with this decorator, to be able to pass it arguments. Source: http://stackoverflow.com/a/26151604 >>> @parameterized ... def multiply(f, n): ... def dec(*args, **kwargs): ... return f(*args, **kwargs) * n ... return dec >>> @multiply(5) ... def add(a, b): ... return a + b >>> add(3, 2) 25 """ def layer(*args, **kwargs): def repl(f): return dec(f, *args, **kwargs) return repl return layer
9200eaec3ecf2405fc7e3bca0cce187659af4540
105,674
def open_file_read(filename): """Read file.""" try: with open(str(filename), "r") as open_file: return open_file.read() open_file.close() except FileNotFoundError: return False
6ef5be8acadf6d7feda6b8e99578bee1e7b9b75c
105,677
def last(seq): """Return seq[-1]""" return seq[-1]
763d99612f40287225eedf012c3ed143bf9d016a
105,683
def getRelevantWebhooks(webhooks, object_type): """ Given a list of webhooks (show structure of a webhook) and a object_type value return the set of webhooks where object_type is in the webhook item ix 4 or whose webhook item 4 list is empty """ relevant_webhooks = [wh for wh in webhooks if object_type in wh[4] or len(wh[4]) == 0 ] # wh[4] is the list of object_types return relevant_webhooks
ae90fcc63e1855f9b5e104e64c7970d422b9c06d
105,684
def _nearby_pages(items): """ Get a list of pages to display for pagination, and None values for continuation dots. Shows up to 12 values, always shows the fist and last two elements and the two elements left and right of the current one. """ if items.paginator.num_pages <= 10: return list(range(1, items.paginator.num_pages + 1)) if items.number <= 6: return list(range(1, 9)) + [None, items.paginator.num_pages, items.paginator.num_pages + 1] if items.number >= items.paginator.num_pages - 6: return [1, 2, None] + list(range(items.paginator.num_pages - 8, items.paginator.num_pages + 1)) return [1, 2, None] + list(range(items.number - 2, items.number + 3)) + [None, items.paginator.num_pages, items.paginator.num_pages + 1]
058a5c1ab545841a8a6065b3822a980c5372ee8d
105,687
import torch def apply_2d_rotation(input_tensor, rotation): """Apply a 2d rotation of 0, 90, 180, or 270 degrees to a tensor. The code assumes that the spatial dimensions are the last two dimensions, e.g., for a 4D tensors, the height dimension is the 3rd one, and the width dimension is the 4th one. """ assert input_tensor.dim() >= 2 height_dim = input_tensor.dim() - 2 width_dim = height_dim + 1 flip_upside_down = lambda x: torch.flip(x, dims=(height_dim,)) flip_left_right = lambda x: torch.flip(x, dims=(width_dim,)) spatial_transpose = lambda x: torch.transpose(x, height_dim, width_dim) if rotation == 0: # 0 degrees rotation return input_tensor elif rotation == 90: # 90 degrees rotation return flip_upside_down(spatial_transpose(input_tensor)) elif rotation == 180: # 90 degrees rotation return flip_left_right(flip_upside_down(input_tensor)) elif rotation == 270: # 270 degrees rotation / or -90 return spatial_transpose(flip_upside_down(input_tensor)) else: raise ValueError( "rotation should be 0, 90, 180, or 270 degrees; input value {}".format(rotation) )
58c0e9e12320d2d508fd4dc0a89cb05b8c9d62ff
105,688
import re def parse_registry_path( rpstring, defaults=[ ("protocol", None), ("namespace", None), ("item", None), ("subitem", None), ("tag", None), ], ): """ Parse a 'registry path' string into components. A registry path is a string that is kind of like a URL, providing a unique identifier for a particular asset, like protocol::namespace/item.subitem:tag. You can use the `defaults` argument to change the names of the entries in the return dict, and to provide defaults in case of missing values. :param str rpstring: string to parse :param list defaults: A list of 5 tuples with name of the 5 entries, and a default value in case it is missing (can be 'None') :return dict: dict with one element for each parsed entry in the path """ # This commented regex is the same without protocol # ^(?:([0-9a-zA-Z_-]+)\/)?([0-9a-zA-Z_-]+)(?::([0-9a-zA-Z_.-]+))?$ # regex = "^(?:([0-9a-zA-Z_-]+)(?:::|:\/\/))?(?:([0-9a-zA-Z_-]+)\/)?([0-9a-zA-Z_-]+)(?::([0-9a-zA-Z_.-]+))?$" regex = "^(?:([0-9a-zA-Z_-]+)(?:::|:\/\/))?(?:([0-9a-zA-Z_-]+)\/)?([0-9a-zA-Z_-]+)(?:\.([0-9a-zA-Z_-]+))?(?::([0-9a-zA-Z_.,|+()-]+))?$" # This regex matches strings like: # protocol://namespace/item:tag # or: protocol::namespace/item:tag # The names 'protocol', 'namespace', 'item', and 'tag' are generic and # you can use this function for whatever you like in this format... The # regex can handle any of these missing and will parse correctly into the # same element # For instance, you can leave the tag or protocol or both off: # ucsc://hg38/bowtie2_index # hg38/bowtie2_index # With no delimiters, it will match the item name: # bowtie2_index res = re.match(regex, rpstring) if not res: return None # position 0: parent namespace # position 1: namespace # position 2: primary name # position 3: tag captures = res.groups() parsed_identifier = { defaults[0][0]: captures[0] or defaults[0][1], defaults[1][0]: captures[1] or defaults[1][1], defaults[2][0]: captures[2] or defaults[2][1], defaults[3][0]: captures[3] or defaults[3][1], defaults[4][0]: captures[4] or defaults[4][1], } return parsed_identifier
5cf4362cc0eff1c32694b3f9babdac11e18cec75
105,689