content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def dstat(infile): """ Wrapper around os.stat(), which returns the output as a dict instead of an object """ fsx = os.stat(infile) sout = { 'dev': fsx.st_dev, 'ino': fsx.st_ino, 'mode': fsx.st_mode, 'nlink': fsx.st_nlink, 'uid': fsx.st_uid, 'gid': fsx.st_gid, 'rdev': fsx.st_rdev, 'size': fsx.st_size, 'atime': fsx.st_atime, 'mtime': fsx.st_mtime, 'ctime': fsx.st_ctime, 'blksize': fsx.st_blksize, 'blocks': fsx.st_blocks } return sout
36779bd505a154fd63cb19724f99652d4e2457ef
14,207
def xlate(vname): """Translate""" if vname in ["ws_10m_nw", "ws_40m_nw", "ws_120m_nw"]: return vname + "ht" return vname
8759031dcc47f5b34128f6b621f837e92e6c9b23
14,208
def reverse_spline(tck): """Reverse the direction of a spline (parametric or nonparametric), without changing the range of the t (parametric) or x (nonparametric) values.""" t, c, k = tck rt = t[-1] - t[::-1] rc = c[::-1] return rt, rc, k
77c239fa8360657ed4ad4c1eb8d18493c97a84a1
14,209
import importlib import pkgutil def list_submodules(package, recursive=True): """ Recursively (optional) find the submodules from a module or directory Args: package (str or module): Root module or directory to load submodules from recursive (bool, optional): Recursively find. Defaults to True. Returns: array: array containing module paths that can be imported """ if isinstance(package, str): package = importlib.import_module(package) results = [] for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__): full_name = package.__name__ + '.' + name results.append(full_name) if recursive and is_pkg: results.extend(list_submodules(full_name)) return results
a5d69affc4cd19bd3e7c67c2d006c224bbafe80d
14,210
def trainee_already_marked_training_date(trainee, training_date): """Check whether trainee already marked the given training date. If trainee already has training day info it means he/she answered marked the given date. Args: trainee(models.Trainee): trainee instance to check whether already marked the given date. training_date(datetime.date): training date to check. Returns: bool. True if trainee already marked the given date, otherwise False. """ return bool(trainee.get_training_info(training_date=training_date))
138006be069201923017e0277ffec6a7c06080b4
14,211
import os import sys def __validate_directory__(paths): """ Validates the given path, it first check if it's a directory, then validates if contains a specific identifier. :param paths: dictionary with path and path identifier """ for path, file in paths.items(): if not os.path.isdir(path) or file and \ not os.path.isfile(os.path.join(path, file)): print("'{}' is not a valid workspace or project directory" .format(path), file=sys.stderr) return False return True
0986099a31c76992187bdcfca8e9d26605483806
14,212
def add_one(x: int) -> int: """ Returns input value added one :param x: value greater than or equal to 0 :return: x + 1 :except x less than 0: """ assert x >= 0 return x + 1
eaf60652a9dc210b3f4174c18873be1973c4f99e
14,213
def calculate_individual_geo_mean(lines, index, geo_mean, num): """ Calculate an individual line of parse results goe_mean """ previous_value = None for line in lines: line = line.split("\t")[4:] current_value = line[index] try: if float(current_value) > 0: geo_mean *= float(current_value) num += 1 except ValueError: if not previous_value: previous_value = current_value elif current_value != previous_value: previous_value = "-1" return geo_mean, num, previous_value
765b03379d11d2622d07086e1858ee51807bc293
14,215
def sort_hsvs(hsv_list): """ Sort the list of HSV values :param hsv_list: List of HSV tuples :return: List of indexes, sorted by hue, then saturation, then value """ bars_with_indexes = [] for index, hsv_val in enumerate(hsv_list): bars_with_indexes.append((index, hsv_val[0], hsv_val[1], hsv_val[2])) bars_with_indexes.sort(key=lambda elem: (elem[1], elem[2], elem[3])) return [item[0] for item in bars_with_indexes]
49a0936a04156ef1d785dea7efc95d5fffb368e0
14,218
def GetCcIds(issue): """Get the Cc's of an issue, whether they are explicit or derived.""" return issue.cc_ids + issue.derived_cc_ids
c5d558c4bfac4da5e501bc834e6218a44c49fbd9
14,219
def cleanupFilename( fname ): """: Make the filename usable. = INPUT VARIABLES - fname Given a filename, clean-it up to make sure we can use it with the file system. = RETURN VALUE - Returns a cleaned up form of the input file name. """ fname = fname.replace( ' ', '_' ) fname = fname.replace( '/', '_' ) fname = fname.replace( '\\', '_' ) fname = fname.replace( '!', '_' ) fname = fname.replace( '*', '_' ) fname = fname.replace( '`', '_' ) fname = fname.replace( "'", "_" ) fname = fname.replace( '"', "_" ) fname = fname.replace( '{', '(' ) fname = fname.replace( '}', ')' ) fname = fname.replace( '&', '_and_' ) return fname
25f727172ed9438c49510fd6703d2d56c61f85f2
14,220
import json import ast def parseJsonDatum(js): """Parse a JSON string, return parsed object or None :rtype: :param js A valid JSON string :return parsed JSON as a dict """ if isinstance(js, dict): return js if not isinstance(js, str): return None try: # print "JSON: "+js res = json.loads(js) except: try: # print "JSON didn't work\n Trying AST" res = ast.literal_eval(js) except: res = js return res
29d334038b9275fc822576f9670966768d02060e
14,221
def continue_or_stop(question): """ n or y is parsed from inputs first character so it's the only part that matters """ while "answer not y or n": reply = str(input(question + " (y/n): " )).lower().strip() if reply[:1] == 'y': return True elif reply[:1] == 'n': return False else: print("ERROR: Not a proper prompt")
3447a3b205e71270ccede9fa5608c823afa10062
14,222
import os def data_dir_check(dir_list, woid, date): """Create and return transfer directory if 'model' found in dir path.""" # return NA if no transfer dir found transfer_dir = 'NA' # iterate over data dirs for directory in dir_list: # if model found, create transfer dir and return path if os.path.isdir(directory) and 'model' in directory: dir_path_items = directory.split('/') for no, d in enumerate(dir_path_items): if 'model' in d: model_directory = '/'.join(dir_path_items[:no + 1]) + '/' transfer_dir = os.path.join(model_directory, 'data_transfer/{}_{}/'.format(woid, date)) if os.path.isdir(transfer_dir): print('Transfer Directory already exists: {}'.format(transfer_dir)) return 'NA' if os.path.isdir(model_directory) and not os.path.isdir(transfer_dir): try: os.mkdir(transfer_dir) except OSError: # raise OSError("Can't create destination directory {}!".format(transfer_dir)) return 'NA' print('Data transfer directory created:\n{}'.format(transfer_dir)) return transfer_dir return transfer_dir
424f0c36e6768328532676d429d690c50fbc6bd8
14,223
import json def jsonify(val): """Perform a naive JSON dump.""" return json.dumps(val)
0b52203062c031ad10fd660c96daa0459f81d367
14,224
def find_road_building_relations(voronoi, pts_line, pts_left, pts_right, road_crds): """ Find all road IDs facing the current road segment. Makes use of the indices of the points between which a voronoi ridge lies. This can be used to check if a road and building are facing each other. """ r_first = road_crds[0] r_last = road_crds[-1] ridge_points = voronoi.ridges b_ids_left = [] b_ids_right = [] # Store in a dictionary for each road segment ID, which building IDs # belong to the road. for pair in ridge_points: p_1 = voronoi.points[pair[0]] p_2 = voronoi.points[pair[1]] # Exlude end points of road for voronoi - limit wrong classifications # of buildings facing a road at the end of the street. if p_1 == r_first or p_1 == r_last or p_2 == r_first or p_2 == r_last: continue # Point one belongs to the road segment. if p_1 in pts_line: # Point two belongs to a building. fids_left = [fid for fid, bld_coords in pts_left.items() if p_2 in bld_coords] fids_right = [fid for fid, bld_coords in pts_right.items() if p_2 in bld_coords] if fids_left: b_ids_left.extend(fids_left) elif fids_right: b_ids_right.extend(fids_right) # Point two belongs to a road segment. elif p_2 in pts_line: # Point one belongs to a building. fids_left = [fid for fid, bld_coords in pts_left.items() if p_1 in bld_coords] fids_right = [fid for fid, bld_coords in pts_right.items() if p_1 in bld_coords] if fids_left: b_ids_left.extend(fids_left) elif fids_right: b_ids_right.extend(fids_right) # Count the number of cells that touch the road for each building, # which can then be used as a weight. left_weights = {n_left_bld:b_ids_left.count(n_left_bld) for n_left_bld in b_ids_left} right_weights = {n_right_bld:b_ids_right.count(n_right_bld) for n_right_bld in b_ids_right} # Remove the duplicate buildings corresponding to a road segment and return the result. return list(set(b_ids_left)), list(set(b_ids_right)), left_weights, right_weights
4c023d6a93bb8a6fbdcd7565e4b191fe2b63a66a
14,225
async def subnet_present( hub, ctx, name, address_prefix, virtual_network, resource_group, security_group=None, route_table=None, connection_auth=None, **kwargs, ): """ .. versionadded:: 1.0.0 Ensure a subnet exists. :param name: Name of the subnet. :param address_prefix: A CIDR block used by the subnet within the virtual network. :param virtual_network: Name of the existing virtual network to contain the subnet. :param resource_group: The resource group assigned to the virtual network. :param security_group: The name of the existing network security group to assign to the subnet. :param route_table: The name of the existing route table to assign to the subnet. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure subnet exists: azurerm.network.virtual_network.subnet_present: - name: vnet1_sn1 - virtual_network: vnet1 - resource_group: group1 - address_prefix: '192.168.1.0/24' - security_group: nsg1 - route_table: rt1 - connection_auth: {{ profile }} """ ret = {"name": name, "result": False, "comment": "", "changes": {}} action = "create" if not isinstance(connection_auth, dict): if ctx["acct"]: connection_auth = ctx["acct"] else: ret[ "comment" ] = "Connection information must be specified via acct or connection_auth dictionary!" return ret snet = await hub.exec.azurerm.network.virtual_network.subnet_get( ctx, name, virtual_network, resource_group, azurerm_log_level="info", **connection_auth, ) if "error" not in snet: action = "update" if address_prefix != snet.get("address_prefix"): ret["changes"]["address_prefix"] = { "old": snet.get("address_prefix"), "new": address_prefix, } nsg_name = None if snet.get("network_security_group"): nsg_name = snet["network_security_group"]["id"].split("/")[-1] if security_group and (security_group != nsg_name): ret["changes"]["network_security_group"] = { "old": nsg_name, "new": security_group, } rttbl_name = None if snet.get("route_table"): rttbl_name = snet["route_table"]["id"].split("/")[-1] if route_table and (route_table != rttbl_name): ret["changes"]["route_table"] = {"old": rttbl_name, "new": route_table} if not ret["changes"]: ret["result"] = True ret["comment"] = "Subnet {0} is already present.".format(name) return ret if ctx["test"]: ret["result"] = None ret["comment"] = "Subnet {0} would be updated.".format(name) return ret else: ret["changes"] = { "old": {}, "new": { "name": name, "address_prefix": address_prefix, "network_security_group": security_group, "route_table": route_table, }, } if ctx["test"]: ret["comment"] = "Subnet {0} would be created.".format(name) ret["result"] = None return ret snet_kwargs = kwargs.copy() snet_kwargs.update(connection_auth) snet = await hub.exec.azurerm.network.virtual_network.subnet_create_or_update( ctx=ctx, name=name, virtual_network=virtual_network, resource_group=resource_group, address_prefix=address_prefix, network_security_group=security_group, route_table=route_table, **snet_kwargs, ) if "error" not in snet: ret["result"] = True ret["comment"] = f"Subnet {name} has been {action}d." return ret ret["comment"] = "Failed to {0} subnet {1}! ({2})".format( action, name, snet.get("error") ) if not ret["result"]: ret["changes"] = {} return ret
66ab96df63a1a4e0d6ba0e5380360d06afe079d4
14,226
def _layout_to_matrix(layout): """Create the adjacency matrix for the tree specified by the given layout (level sequence).""" result = [[0] * len(layout) for i in range(len(layout))] stack = [] for i in range(len(layout)): i_level = layout[i] if stack: j = stack[-1] j_level = layout[j] while j_level >= i_level: stack.pop() j = stack[-1] j_level = layout[j] result[i][j] = result[j][i] = 1 stack.append(i) return result
cd27cad3a7ab6f34ea2be13e5617a34dbbba834f
14,228
import click def email_arg(required, email="email", metavar="[EMAIL]", nargs=1): """ Email positional argument standard definition. Use as decorator for commands. """ return click.argument(email, metavar=metavar, nargs=nargs, type=str, required=required)
26e51c44f1109c0f690f243ee9d40de959570b08
14,229
def simplify_dataset_name(dataset_name): """In a couple of cases (BraTS and MURA) the dataset name is not quite correct because of a mistake made earlier in the pipeline. This function transforms the dataset names into a more readable format. Args: dataset_name (string): name of dataset to simplify Returns: string: simplified dataset name """ if "BraTS20" in dataset_name: return "BraTS20" elif "study" in dataset_name: return "MURA" else: return dataset_name
af0e504df0af2f250c716fb4f797970f31cbb1e1
14,230
import click from pathlib import Path import os def edit_inputs(client, workflow): """Edit workflow inputs.""" for input_ in workflow.inputs: new_path = click.prompt( '{0._id}'.format(input_), default=input_.consumes.path, ) input_.consumes.path = str( Path(os.path.abspath(new_path)).relative_to(client.path) ) for step in workflow.subprocesses: for argument in step.arguments: argument.value = click.prompt( '{0._id}'.format(argument), default=argument.value, ) return workflow
e9668f717b50786ddd3fe6375f56d17bad5ada05
14,231
import argparse def get_parser(): """ Returns the CLI parser to get the YAML file for the EC2 instance configuration. Retrieve the YAML configuration file using the 'file' argument. """ parser = argparse.ArgumentParser(description='Creates EC2 instances from a basic YAML configuration file') parser.add_argument('file', help='The YAML file to use for making the EC2 instances') return parser
2f57a88db79529e89d685a84dcdcf8101f38a833
14,232
import random def seq_aleatoire(l_ascii): """Mélange une liste. Cette fonction permet de générer une séquence alétoire de même composition que celle passée en argument. Parameters ---------- l_ascii : list `l_ascii` est une liste d'entiers. Returns ------- l_ascii : list `l_ascii` est une liste d'entiers qui correspond à la liste donnée en argument ordonnée différemment. """ random.shuffle(l_ascii) return l_ascii
eb9c3e1d5182c4a359566e8efd1174934f415c4b
14,235
def predict_linear_regression_labaled_features(fitted_model, dict_features): """ Calculates y ~ const + sum( parameter*value ) essentially the inner product { 'feature name' : value } Does not assume you have all features present, so prediction may be off. Assumes const parameter is not present in dictionary """ list_given_terms = [ fitted_model.params[key]*value for key, value in dict_features.items() ] if 'const' not in dict_features: constant_value = fitted_model.params['const'] list_given_terms.append(constant_value) return sum(list_given_terms)
b86fa00ff80e528080e473f849721a16e6f9f2a8
14,237
def parse_time(text): """ parse time of slurm output """ # check for days tokens = text.split('-', 1) if len(tokens) > 1: days = int(tokens[0]) rest = tokens[1] else: days = 0 rest = tokens[0] # check for time tokens = rest.split(':') return days*24*60 + int(tokens[0])*60 + int(tokens[1])
91a07479f02a6d428a2c45fe6b701e009a9675cf
14,239
def get_video_name(image_name): """ Extracts the video name from an image filename. Args: image_name: The name of the image file. Returns: The name of the video that the image belongs to. """ video_name = image_name.split("_")[0:3] video_name = "%s_%s_%s" % (video_name[0], video_name[1], video_name[2]) return video_name
d2e5254dfaa650455b346cec43273cd50004b335
14,240
def estimate_reasonable_max(df, x): """Estimate a reasonable maximum value for the plot y axis.""" # Use 1.5 IQR of the largest group. group = df.groupby(x) q1 = group.Time.quantile(0.25) q3 = group.Time.quantile(0.75) iqr = (q3 - q1).max() return q3.max() + 1.5*iqr
a5fd4be194ca938c7cebd1ad31128ccffcfd44bd
14,241
def _add_members_to_obj(obj, data, columns): """Add members to obj.members """ i = 0 for member in obj.members: name = 'member_' + str(i + 1) data += ('\n'.join((f'{a}={v}' for a, v in member[i])),) columns = columns + (name, ) i += 1 return data, columns
6fd03824f3a17bb92fd7a42e16d3951222451cc2
14,242
def get_points(file,img_shape,mirror=False): """ Read wp from a YOLO style file: each wp are the upper-left and the lower-right points of the bounding box """ points = [] img_shape = img_shape[:-1][::-1] file = open(file).read().split('\n')[:-1] for r in file: r = r.split() center = (float(r[1])*img_shape[0],float(r[2])*img_shape[1]) width = float(r[3])*img_shape[0] height = float(r[4])*img_shape[1] if mirror: # depends on orientation of rows p1 = round(center[0]+width/2),round(center[1]-height/2) p2 = round(center[0]-width/2),round(center[1]+height/2) else: p1 = round(center[0]-width/2),round(center[1]-height/2) p2 = round(center[0]+width/2),round(center[1]+height/2) points.append((p1,p2)) return points
9648edc2aa065aceadf2128c28b5ff71ded2cb92
14,243
import json import os def build_feature_dict(args, examples): """Index features (one hot) from fields in examples and options.""" # if not args.create_vocab: return json.load(open(os.path.join(args.vocab_dir, 'feat_dict.json'))) def _insert(feature): if feature not in feature_dict: feature_dict[feature] = len(feature_dict) feature_dict = {} # Exact match features if args.use_in_question: _insert('in_question') _insert('in_question_uncased') if args.use_lemma: _insert('in_question_lemma') # Part of speech tag features if args.use_pos: for ex in examples: for w in ex['pos']: _insert('pos=%s' % w) # Named entity tag features if args.use_ner: for ex in examples: for w in ex['ner']: _insert('ner=%s' % w) # Term frequency feature if args.use_tf: _insert('tf') return feature_dict
7be12c092f369587429e7ff0af9c05b2e6200c55
14,244
def _is_url(filename): """Check if the file is a url link. Args: filename (str): the file name or url link. Returns: bool: is url or not. """ prefixes = ['http://', 'https://'] for p in prefixes: if filename.startswith(p): return True return False
cfffb3db75de4f613097c081f950d3feff079f63
14,245
import types def list_module_versions(glob, return_dict = False): """ Prints the versions of all loaded modules/packages in a script or notebook. Parameters ---------- glob : dict output of the globals() function call. return_dict : bool, optional Parameter to decide if function should return versions dict. The default is False. Returns ------- versions_dict : dict, optional Dict with module names as keys and version numbers as values. """ versions_dict = {} for name, val in glob.items(): if isinstance(val, types.ModuleType) and "__version__" in val.__dict__: print(val.__name__, val.__version__) versions_dict[val.__name__] = val.__version__ if return_dict is True: return versions_dict
17a51568e5bf80f8eb8dac23f1996dc5dd609c50
14,246
def get_events(trace_collection, keys=None, syscall=True): """Return a generator of events. An event is a dict with the key the arguement's name. Args: trace_collection (babeltrace.TraceCollection): Trace from which to read the events. keys (dict, optional): dict of the multiple ways of the arguments to consider in addition to name and timestamp. syscall (bool, optional): only syscall should be considered Returns: generator: a generator of events. """ return ( { **{ 'name': event.name, 'timestamp': event.timestamp }, **{ keys[k]: event[k] # scope 3 = Stream event context (procname, pid, tid) for k in event.field_list_with_scope(3) if keys and k in keys }, **{ keys[k]: event[k] # scope 5 = Event fields (return value) for k in event.field_list_with_scope(5) if keys and k in keys } } for event in trace_collection.events if not syscall or "syscall" in event.name)
abc6e99c1e3cc64b45671ab6e054bb4bffcf2032
14,247
import torch def softplus_inverse(x: torch.Tensor) -> torch.Tensor: """ Inverse of the softplus function. This is useful for initialization of parameters that are constrained to be positive (via softplus). """ if not isinstance(x, torch.Tensor): x = torch.tensor(x) return x + torch.log(-torch.expm1(-x))
aa4649368b9e8372e7a22b4ab460bf8d26a38dad
14,248
def rosmac(y, t=0, r0=0.5, k=10, g0=0.4, h=2, l=0.15, e=0.6): """ Rosenzweig-MacArthur predator prey model """ prey, cons = y def r(x): """ Growth rate """ return r0*(1 - x/k) def g(x): """ Grazing rate """ return g0/(x + h) dydt = [r(prey)*prey -g(prey)*prey*cons, -l*cons + e*g(prey)*prey*cons] return dydt
ec17e71cf5742b3db2b48123a63c14ae62101bee
14,249
import re def shorten(CSTAG: str) -> str: """Convert long format of cs tag into short format Args: CSTAG (str): cs tag in the **long** format Return: str: cs tag in the **short** format Example: >>> import cstag >>> cs = "cs:Z:=ACGT*ag=CGT" >>> cstag.shorten(cs) cs:Z::4*ag:3 """ cstags = re.split(r"([-+*~=])", CSTAG.replace("cs:Z:", ""))[1:] cstags = [i + j for i, j in zip(cstags[0::2], cstags[1::2])] csshort = [] for cs in cstags: if cs[0] == "=": csshort.append(":" + str(len(cs) - 1)) continue csshort.append(cs) return "cs:Z:" + "".join(csshort)
653de1947a3cdf06103b01c1a7efc3dbc16ea4ab
14,250
def dbex_eval(db, expr): """Evaluate a database expression""" return db.ex_eval(expr)
9739e2dd85fadd4d1569ddec20c6db57379a2de9
14,251
def _empty_invoke_bc(ivkbl, _stack, stack_ptr): """Write a warning to the screen""" print( "Warning: undefined primitive #%s called" % ivkbl.get_signature().get_embedded_string() ) return stack_ptr
89b33c7b888560da6e8d49e432db9bc068bfaa66
14,253
import math def lognormal_stddev(m, stddev): """ compute std. of log x with mean and std. of x Args: m: mean of x stddev: standard deviation of x Returns: std. of log x """ return math.sqrt(math.log((stddev * stddev) / (m * m) + 1))
cb3abebc2225e3b2d33bdc1b6ff12c950437a594
14,254
import aiohttp async def get_new_bangumi_json() -> dict: """ Get json data from bilibili Args: Examples: data = await get_new_bangumi_json() Return: dict:data get from bilibili """ url = "https://bangumi.bilibili.com/web_api/timeline_global" headers = { "accept": "application/json, text/plain, */*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9", "origin": "https://www.bilibili.com", "referer": "https://www.bilibili.com/", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36" } async with aiohttp.ClientSession() as session: async with session.post(url=url, headers=headers) as resp: result = await resp.json() return result
02d39b7535a7db7b50dc81851725ffc67653cd17
14,255
def compare_math_formula(query, formula): """Compares two math tuples Parameters: query: the query math tuple (str) formula: the formula math tuple (str) Returns: same: True if tuples are considered equal (boolean) """ if "'*'" in query: # break on the wild card query_parts = query.split("'*'") index = 0 # make sure all the parts are in the formula while index < len(query_parts) and query_parts[index] in formula: index += 1 same = False # if all parts of query are in formula then must be good to go if index == len(query_parts): same = True else: # if no wildcards then just a normal str comparison same = query == formula return same
736e9f564e4c834270fd25af7ed8252873d4c07d
14,256
def interpolate(df, key): """Interpolate subset of MulitIndex df.""" subset = df.xs(key, level=0, drop_level=False, axis=1) return (subset.interpolate(axis=1) .where(subset.fillna(method='bfill', axis=1).notnull()))
0f5fec0b891871747337194bf56a0814ee015fc0
14,257
import re def output_name_from_topic(topic: str, prefix: str, topic_type: str) -> str: """ Parses an MQTT topic and returns the name of the output that the message relates to. """ match = re.match(f"^{prefix}/{topic_type}/(.+?)/.+$", topic) if match is None: raise ValueError("Topic %r does not adhere to expected structure" % topic) return match.group(1)
ca6b3eb77815eee3f779bf16e8f706532dfe42e6
14,258
def invalid_entitlements_fixture(request): """A parameterized pytest fixture which returns invalid entitlements. Args: request : Built-in pytest fixture Returns: [String]: A string containing invalid Entitlements. """ return request.param
7f98b101bd8db303bb62a8c392689694f3a46805
14,259
def exponential(x, halflife): """ Returns a decay factor based on the exponential function .. math:: f(x) = 2^(-x/halflife). :param x: The function argument. :param halflife: The half-life of the decay process. """ return 2 ** (-x / halflife)
2561d42f87c2818e82c2cc065592acf9ac8202fe
14,260
def rm_strheader(images): """ Remove the header of the images base64 string from front-end :param images: the input should be the base64 string of the image :raises TypeError: if input is not a string :returns: base64 string without data header :rtype: string """ if type(images) is not str: raise TypeError('Error: imput should be a string') index = images.find(',') image_str = images[index + 1:] return image_str
7466621dfa0a5bb31eb8735da89d68b885cb04ea
14,262
def box_to_rect(box, width, height): """ :param box: center_x, center_y, bbox_w, bbox_h :param width: image width :param height: image height :return: x1, y1, x2, y2(in pixel) """ x, y, w, h = box x1 = (x - w * 0.5) * width y1 = (y - h * 0.5) * height x2 = (x + w * 0.5) * width y2 = (y + h * 0.5) * height return [int(x1), int(y1), int(x2), int(y2)]
32413d14e242f2d790c5ae9c93418abb4152df46
14,263
def SqrNorm(x): """Square of the norm. :param x: return sum(x*x) :return: return sum(x*x) """ return sum(x*x)
03f35a7395ba74981eace38b634a14210932cffd
14,268
def new_superuser(db, new_superuser_factory): """Return a new superuser""" return new_superuser_factory()
7b0751788c9aea1cd37b7e26deb5a7f6e1d6a35f
14,269
def _parse_numpy_pixeltype_from_string(s): """ example: "image_FLOAT_3D.nii.gz" -> 'float32' """ if 'UNSIGNEDCHAR' in s: return 'uint8' elif 'CHAR' in s: return 'int8' elif 'UNSIGNEDSHORT' in s: return 'uint16' elif 'SHORT' in s: return 'int16' elif 'UNSIGNEDINT' in s: return 'uint32' elif 'INT' in s: return 'int32' elif 'FLOAT' in s: return 'float32' elif 'DOUBLE' in s: return 'float64' else: raise ValueError('cant parse pixeltype from string %s' % s)
8066c9eab8b31fa290ac68d44b6bd448a3a689de
14,270
def set_zenangle(profs, zen=None): """Set zenith angles to a new value. If zen is missing, angles cycle in steps of 10 degrees between 0 and 70 over the profile set.""" if zen: zenlist = [zen] else: zenlist = range(0, 71, 10) for p in range(profs['nprof']): profs[p]['zenangle'] = zenlist[p % len(zenlist)] return profs
41d7099e7eb7ac9da1532c28be00b1a574e84e6e
14,271
def preprocess(X): """ Optional preprocessing for secondary data format. @param X: list of lists of lists (glogs, function calls, function call + attributes) """ for i in range(len(X)): for j in range(len(X[i])): X[i][j] = ' '.join([str(x) for x in X[i][j]]) X[i] = '\n'.join(X[i]) return X
b759c0353dfeb5c2d4292fd541c8dba507c1ccea
14,272
import requests from bs4 import BeautifulSoup def question_info_get(qid): """ 获取问题的标签 Parameters ---------- qid : int question id. Returns ------- keywords:list 问题关键词 """ headers = { "User-Agent": "", "Connection": "", "Accept": "", "Accept-Language": ""} q_url = f'https://www.zhihu.com/question/{qid}' # print(q_url) try: html = requests.get(q_url, headers=headers, timeout=20) html.encoding ='utf-8' soup =BeautifulSoup(html.text,'lxml') keywords = str(soup.find('meta', {'name': 'keywords'})) keywords = keywords.split('"')[1].split(',') return keywords except: return []
fb5f1d6bf41a267237f24d677e43c51c474a90f7
14,273
def dup(integer): # real signature unknown; restored from __doc__ """ dup(integer) -> integer Duplicate an integer socket file descriptor. This is like os.dup(), but for sockets; on some platforms os.dup() won't work for socket file descriptors. """ return 0
aac98f8c767d2224b3949aea4aafcb83d0c0ee1c
14,276
import torch def communities_to_labels(n_nodes, communities): """ For n_nodes nad a communties dictionary or list of lists returns a list with size 'n_nodes' assigning to each node index the corresponding node community. :param n_nodes: :param communities: :return: """ comm_labels = torch.zeros([n_nodes]) if isinstance(communities, dict): comm_index = list(communities.keys()) else: comm_index = range(len(communities)) i = 1 for comm in comm_index: comm_val = communities[comm] for node_id in comm_val: comm_labels[node_id] = i i += 1 return comm_labels
e961568fff8959e909f0b44469a745d33ff5233d
14,277
def check_intensifiers(text, INTENSIFIER_MAP): """ Utility function to check intensifiers of an emotion :param text: text chunk with the emotion term :return: boolean value and booster value for intensifiers """ # BOOSTER_MAP = {"B_INCR": 2, # "B_DECR": 0.5} intensity_word_list = INTENSIFIER_MAP print(intensity_word_list) has_intensity = False booster = 'NULL' for int_term in intensity_word_list: intensifier = int_term.split(':')[0].strip() # print(intensifier) if intensifier in text: # print('yes') has_intensity = True booster = float(int_term.split(':')[2].strip()) return has_intensity, booster
2c35b6b66395bc7105b9fb1b9cf7f04b5686cb8d
14,278
def get_next_code(last_code): """Generate next code based on the last_code.""" return (last_code * 252533) % 33554393
9fcf416df3448a8ca46a55073f66d12cd8fc2593
14,279
import shutil from pathlib import Path import os def is_ffmpeg_installed(ffmpeg: str = "ffmpeg") -> bool: """ Check if ffmpeg is installed. """ if ffmpeg == "ffmpeg": # use shutil.which to find ffmpeg in system path return shutil.which("ffmpeg") is not None abs_path = str(Path(ffmpeg).absolute()) # else check if path to ffmpeg is valid # and if ffmpeg has the correct access rights return os.path.isfile(abs_path) and os.access(abs_path, os.X_OK)
16fc862ab68313e6df9d41156856bf68b26b9192
14,280
import base64 def decode(b64): """ Decode given attribute encoded by using Base64 encoding. The result is returned as regular Python string. Note that TypeError might be thrown when the input data are not encoded properly. """ barray = base64.b64decode(b64) return barray.decode('ascii')
5c04c43247d1415ca8f1398c3b8206c50a7e0fa4
14,282
from typing import Counter def session_referer_class(session): """Most common referer class in session (categorical).""" referer_class = Counter([r['referer_class'] for r in session]).most_common(1)[0][0] return referer_class
0b4047ea63063b7535bbaea7c67756de15561522
14,285
def _compute_max_name_width(contracts: dict) -> int: """Return the maximum width needed by the function name column.""" return max( len(function) for contract, functions in contracts.items() for function in functions )
bc57c408a49182cdfb26a1955a4a3943c060af86
14,288
import logging def get_logger(name): """Get the logger with a given name Args: name: name of the logger to create """ return logging.getLogger(name)
0f02aba7f01c2aafbb7fafda0bb260d73fd240a1
14,290
def search_dicts(k, v, data): """ Search a list of dicts by key """ match = [] for row in data: if k in row: if v == row[k]: match.append(row) if len(match) == 1: # If we only get one result: return just it as a dictr return match[0] else: return match
c133ec1452f73e51c19af59b6fb49039dda28176
14,292
def get_type_tuple(df): """ Parameters ---------- df: data frame Returns ------- Return a list [(colunm name, datatype)] """ return([(c, df[c].dtype) for c in df.columns])
a5c0568defbbd4a4fdc88bf1e1bce058bf8d8495
14,293
def decode_enums(obj, available_enums=None): """ Decode enums from parsed yaml file :param obj: object to check :param available_enums list of available enums classes to decode :return: decoded object """ if isinstance(obj, dict): new_obj = {} for key, value in obj.items(): value = decode_enums(value, available_enums=available_enums) new_obj[key] = value return new_obj elif isinstance(obj, list): if len(obj) == 3 and obj[0] == 'enum': if available_enums: for enum in available_enums: if enum.__name__ == obj[1]: for enum_item in enum: if enum_item.name == obj[2]: return enum_item return obj else: return obj
e8bf3cc55363fbf13ad400bbbfd3006ce23cd3d5
14,294
import itertools def euler4(l=3): """Solution for problem 4.""" # simple optimisation would be an early break return max(n for n in (i * j for i, j in itertools.combinations(range(10 ** (l - 1), 10 ** l), 2)) if str(n) == str(n)[::-1])
6e0d4fbeb14b61a30b78caccf4de168fdff12efb
14,295
def _moffat_(r, alpha, beta, normed=False): """ """ norm = 2*(beta-1)/alpha**2 if normed else 1 return norm * (1 + (r/alpha)**2 )**(-beta)
6dcea83247d97c1cb52f7051fe1444c21af604c7
14,297
def label_mapping(y_data, known_labels): """ Maps all class index to used one. It is possible to subsample number of classes. :param y_data: Array with labels to map :type y_data: nd.array :param known_labels: Labels index to map :type known_labels: list-like :return: label_map and y_data with cls_idx remapped :rtype: tuple(list, nd.array) """ label_map = {} for i, l in enumerate(known_labels): label_map[i] = l for i, l in enumerate(known_labels): print(f"\t {l} => {i}") y_data[y_data == l] = i return label_map, y_data
d2db167aeb51b02167d530100652625eb44e8bbd
14,298
def create_periodic_dosing(timeHigh, timeLow, highVal, lowVal=0): """Create a Periodic dosing profile Create a dosing profile which oscillates between high and low values. Remains high for timeHigh and low for lowHigh each period :param timeHigh: Time dosing remains at highVal (float) :param timeLow: Time dosing remains at lowVal (float) :param highVal: dosing ammount during high levels (float) :param lowVal: dosing level during low levels (float) :returns: Dosing Profile :rtype: Callable[[float], float] """ def inner(t: float) -> float: phase = t%(timeHigh + timeLow) return highVal if phase <= timeHigh else lowVal return inner
358723ba0187ffddb91d9aa1d2c70a1f14a774b2
14,299
import itertools def _generate_centers_and_final_shape(img_shape, stride, first_center): """ generates final shape and centers for applying a sliding window """ dim_ranges = [] for dim_min, dim_max, dim_stride in zip(first_center, img_shape, stride): dim_ranges.append(range(dim_min, dim_max, dim_stride)) final_shape = tuple([len(dim_range) for dim_range in dim_ranges]) centers = itertools.product(*dim_ranges) return final_shape, centers
be92aee8f844e20aa41761e6b89a9c831cd2e934
14,300
def _map_account_name_to_login_names(accounts): """ Maps the accounts name to the login names. this method simply groups the csv rows by the account name :param accounts: list of accounts records :return: dict """ accounts_dict = {} for account in accounts: columns = account.decode("utf-8").split(",") if len(columns) >= 3: account, login_name, risk_score = columns[0], columns[1], columns[2] if account not in accounts_dict: accounts_dict[account] = {"login_names": set(), "score": 0} accounts_dict[account]["login_names"].add(login_name) # Only insert the highest risk score for the login names belong to the account. if accounts_dict[account]["score"] < int(float(risk_score)): accounts_dict[account]["score"] = int(float(risk_score)) return accounts_dict
9553659e7a8cf4ef192e6019b603f954e242abbe
14,301
def match(first_list, second_list, attribute_name): """Compares two lists and returns true if in both there is at least one element which has the same value for the attribute 'attribute_name' """ for i in first_list: for j in second_list: if i[attribute_name] == j[attribute_name]: return True return False
2b7c38ef3132c5cb9e693be2995691600ac76ec7
14,302
def get_spacing_dimensions(widget_list): """ will be further developed """ space_dimensions = space_label = 0 for widg in widget_list: space_d = widg.get('dimensions', '') space_l = widg.get('label', '') if space_d: space_d_new = len(list(space_d)) if space_d_new > space_dimensions: space_dimensions = space_d_new # if space_l: if space_dimensions == 0: size_unit = {'size_unit': 'null'} elif 0 < space_dimensions <= 2: size_unit = {'size_unit': 's'} elif 2 < space_dimensions <= 6: size_unit = {'size_unit': 'm'} else: size_unit = {'size_unit': 'l'} return size_unit
8c521b1315c120173d22ff25bf3c2b41bdab8ecd
14,304
def _check_core_match(shape1, shape2): """Checks shape1 and shape2 are valid shapes to perform matmul""" ndim1, ndim2 = len(shape1), len(shape2) if ndim1 < 1 or ndim2 < 2: return True if shape1[-1] == shape2[-2]: return True raise ValueError(f'mismatch in core dimension of input operands (size {shape1[-1]} ' + f'is different from {shape2[-2]})')
4acb2430518b2a5c682d7894991095a98d766cc7
14,305
import os def setupCheckpoint(checkpoint_dir): """ Set up writing down checkpoints: - Make checkpoint directory is it does not exist - Find latest checkpoint Parameter: - checkpoint_dir -- Directory for writing down checkpoints Returns: """ if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) latest_checkpoint = None else: checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)] latest_checkpoint = max(checkpoints, key=os.path.getctime) \ if checkpoints else None return latest_checkpoint
5a1518fb08688cba0f37ab7f18cab4a2c1cca4bd
14,306
def LinearGainLine(x1,y1,x2,y2): """ returns tuple (m,b) that satisfies the equation y=mx+b uses two points in histogram space to determine slope x : 0..nbins y : 0..255 """ if (x2 > x1): m = -(y1-y2) / (x2-x1) else: m = 0 b = x1 return m,b
71d1d321c85636e15f05e3ab26c66a48a1cc5103
14,307
def mean(num_lst): """ Calculates the mean of a list of numbers Parameters ---------- num_list : list List of numbers to calculate the average of Returns ------- The average/mean of num_lst """ Sum = sum(num_lst) count = 0 for num in num_lst: count += 1 return Sum / count
4339a8f755ebb26328bbbc76b2b76034d21ea004
14,309
def find_cycle(start, graph): """Finds a path from `start` to itself in a directed graph. Note that if the graph has other cycles (that don't have `start` as a hop), they are ignored. Args: start: str name of the node to start. graph: {str => iterable of str} is adjacency map that defines the graph. Returns: A list or str with nodes that form a cycle or None if there's no cycle. When not None, the first and the last elements are always `start`. Raises: KeyError if there's a reference to a node that is not in the `graph` map. """ explored = set() # set of roots of totally explored subgraphs visiting = [] # stack of nodes currently being traversed def visit(node): if node in explored: return False # been there, no cycles there that have `start` in them if node in visiting: # Found a cycle that starts and ends with `node`. Return True if it is # "start -> ... -> start" cycle or False if is some "inner" cycle. We are # not interested in the latter. return node == start visiting.append(node) for edge in graph[node]: if visit(edge): return True # found a cycle! popped = visiting.pop() assert popped == node explored.add(node) # don't visit this subgraph ever again return False if not visit(start): return None visiting.append(start) # close the loop return visiting
0ce38b4813f6a0e55898ff3c17b54d35935ce85f
14,310
import numpy def square_aspect(xlim, ylim): """Calculates the limits to produce a square plot if all axes are equal scale. """ x0, x1 = xlim y0, y1 = ylim x_range = numpy.abs(x1 - x0) y_range = numpy.abs(y1 - y0) if x_range > y_range: fac = (x_range - y_range) / 2 return xlim, [y0 - fac, y1 + fac] else: fac = (y_range - x_range) / 2 return [x0 - fac, x1 + fac], ylim
65e4c6518004a86b258d9d7084215c116ea7b733
14,311
def find_events(events, start, end, at_words=None): """Return (date, text, event) tuples for the events in our date range. """ things = set() for event in events: things.update(event.get_dates(start, end, at_words)) return things
52a7250ad6ee172d52d3c563318eac3c4fd436cd
14,312
def has_connected_children(bone): """ Returns true/false whether a bone has connected children or not. """ t = False for b in bone.children: t = t or b.use_connect return t
b88106b3ceac26987253e2995cda851f7a622d2f
14,313
def create_user(first_name, last_name, email, phone_number, dob, address_id): """ Creates immutable user data structure :param first_name: user's first name (string) :param last_name: user's last name (string) :param email: user's email (string) :param phone_number: user's phone number (string) :param dob: user's date of birth (string) :return: (tuple) """ return (first_name, last_name, email, phone_number, dob, address_id)
2c6f647ac7f85fd56f4870bade01bb7a951ff4d2
14,314
import six def ask_for_export_email(): """Ask for a registered email address.""" print('You have not set the email variable at the top of this script.') print('Please set this variable in the script, or enter it below. Note') print('that you need to register your email at JSOC first. You can do') print('this at: http://jsoc.stanford.edu/ajax/register_email.html') try: email = six.moves.input('\nPlease enter a REGISTERED email address: ') except EOFError: email = '' print() return email
7ac8497021e71718f70c54f4c2b045fa455d65e5
14,316
def dlookup_in(d, l): """Return key from a dict if l in val.""" for k,v in d.iteritems(): try: if l in v: return k except TypeError: continue return None
fa53a4e30607c783096b37eef3b3ca936de07097
14,317
def remove_duplicates_for_fetch(items: list, last_fetched_ids: list) -> list: """Remove items that were already sent in last fetch. Args: items (list): Items retrieved in this fetch. last_fetched_ids (list): ID's of items from last fetch. Returns: (list) New items without items from last fetch. """ new_items = [] for item in items: if item.get('id') and item.get('id') not in last_fetched_ids: new_items.append(item) return new_items
705e786563206015877798e227c89a978831f97f
14,319
def append_unique(the_list, new_item): """ append the newe_item to the_list, only if it does not exist :param the_list: :param new_item: :return: """ exist = any(new_item == item for item in the_list) if not exist: the_list.append(new_item) return the_list
2974643e1cbbc7c0cf8dccb7ddc1fd21368eb3f8
14,320
from typing import Tuple from typing import List def flatten(array: Tuple[List[Tuple[int, int, int, int, int]]]): """ :param array: :return: Flattens move tuple a single level deep """ flat_list = [] for sublist in array: for item in sublist: flat_list.append(item) return flat_list
835f2d166a07c41d1803e2c1c4a7244990326302
14,323
def get_playoff_bracket_string(league): """ Creates and returns a message of the league's playoff bracket. :param league: Object league :return: string message league's playoff bracket """ bracket = league.get_playoff_winners_bracket() return bracket
0ac2e02493a99c830704d6651125e0c30c8f4c7c
14,325
def oxygen_abundance(Z): """ Set the oxygen abundance. We assume Asplund et al 2009 abundance at Zsun and that Ao scales linearly with Z. Z in solar units """ Ao = 4.90e-4 return Ao*Z
1fe4ad34afbbe4c43fd883df434d9fcc7a83b9b7
14,327
def subdivide_dict(dictionary, num_subdivisions): """ Distributes the k: v pairs in dict to N dicts. """ subdicts = [{} for i in range(num_subdivisions)] for i, k in enumerate(dictionary.keys()): sub_index = i % num_subdivisions subdicts[sub_index][k] = dictionary[k] return subdicts
6aa9abeff609115e1ecd1085d4ba0d875c59bf2d
14,328
from typing import Dict from typing import Any import pickle def serialize_dlv2_checkpoint(dlv2_checkpoint: Dict[str, Any]) -> bytes: """ Handling the checkpoint conversion from DataLoader V2 format to bytes format. """ return pickle.dumps(dlv2_checkpoint)
ae5c42d0daea42e61ff77c8b52aba30586657e7c
14,331
import os import imp def _import_from(mod, path, mod_dir=None): """ Imports a module from a specific path :param mod: A unicode string of the module name :param path: A unicode string to the directory containing the module :param mod_dir: If the sub directory of "path" is different than the "mod" name, pass the sub directory as a unicode string :return: None if not loaded, otherwise the module """ if mod_dir is None: mod_dir = mod if not os.path.exists(path): return None if not os.path.exists(os.path.join(path, mod_dir)): return None try: mod_info = imp.find_module(mod_dir, [path]) return imp.load_module(mod, *mod_info) except ImportError: return None
87318df47d4d672aeb68663cfecfc1bdebed09a6
14,332
def to_geojson(shapes, buildings): """Converts the shapes into geojson. This function will combine the burn scar region and buildings into geojson. Burn scar polygon in red, buildings polygon all in blue.""" #append burn scar region polygons to geojson if type(shapes) == list: results = ({ 'type': 'Feature', 'properties': {'raster_val': v, 'color': 'red'}, 'geometry': s.__geo_interface__} for i, (s, v) in enumerate(shapes)) else: results = ({ 'type': 'Feature', 'properties': {'raster_val': v, 'color': 'red'}, 'geometry': s} for i, (s, v) in enumerate(shapes)) list_results = list(results) # append the building footprints to geojson results_buildings = ({ 'type': 'Feature', 'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'}, 'geometry': b['geometry']} for i, b in enumerate(buildings['features'])) list_results_buildings = list(results_buildings) collection = { 'type': 'FeatureCollection', 'features': list_results + list_results_buildings} return collection
fb0d417ab3c049d4b89e14dea1f15a3f40f42803
14,334
import numpy def label_ids(array): """ Returns (sorted ndarray of) unique positive elements of array. Made obsolete by segment.extractIds? """ # get all unique elements and keep those that are positive all = numpy.unique(array) pos = all.compress(all>0) return pos
10158dbfea6e5512ee3d81f6c51cfb7a3828bdf1
14,335
def vyhodnot(pole): """1-D piškvorky se hrají na řádku s dvaceti políčky. Hráči střídavě přidávají kolečka (o) a křížky (x).""" if 'xxx' in pole and 'ooo' in pole: return '!' elif 'xxx' in pole: return 'x' elif 'ooo' in pole: return 'o' elif '-' not in pole: return '!' else: return '-'
c647d7f5891ba50d495a0aae5ead57b83594969d
14,336
import re def get_product(title): """Extract protein product from sequence title. Parameters ---------- title : str original title which reads "product [organism]" Returns ------- str product Notes ----- NCBI protein product xyz [organism] """ title = re.sub(r'^\s+|\s+$', '', title) title = re.sub(r'\s+\[.+\]$', '', title) return title
7754e3b90f2119716e64c6a3f60e82c5a6cf8033
14,337
def phone_brands(df): """ >>> import pandas as pd >>> df = pd.read_csv(r'../data.csv') >>> phone_brands(df) {'others': 1912, 'Huawei': 1977, 'LG': 622, 'Apple': 2910, 'Xiaomi': 1491, 'Samsung': 4738, 'Motorola': 584, 'OPPO': 542, 'Vivo': 575} """ brands = {} brands['others'] = 0 # get unique devices uniq_phone_brands = df.device_brand_name.unique().tolist() for x in uniq_phone_brands: if df[df.device_brand_name==x].device_brand_name.count() > 300: brands[x] = df[df.device_brand_name==x].device_brand_name.count() else: brands['others'] = brands['others'] + df[df.device_brand_name==x].device_brand_name.count() return brands
19bc556d0882d5efa73f88f4f58f8090f6ed3fe1
14,338
def choose_display(): """ prompt user to choose scale bar display """ choices = ["d","theta","both"] temp_choice = "false" while temp_choice not in choices: temp_choice = input("Please choose the scale to display.\nd, theta, both\n") if temp_choice not in choices: print("incorrect choice\n") return temp_choice
8f03dbfbbf250881ecb6a27e4de332bdac4dfcd2
14,340
def getAllCombinations(cond_): """Returns all possible combinations of features for training on""" lst = [ "total_bytes", "max_bytes", "1->2Bytes", "2->1Bytes", "1->2Pkts", "2->1Pkts", "total_pkts", "number_ms", "pkt_ratio", "time_spread", "pkt sum", "longest_seq", "total_pkt_sizes", "mean_tdelta", "max_tdelta", ] # 'proto', latency_lst = ["byte_ratio", "pkt_ratio", "time_spread", "total_bytes", "2->1Pkts"] loss_lst = [ "total_pkts", "total_pkt_sizes", "2->1Bytes", "number_ms", "mean_tdelta", "max_tdelta", ] if cond_ == 1: lst = loss_lst if cond_ == 2: lst = latency_lst uniq_objs = set(lst) combinations = [] for obj in uniq_objs: for i in range(0, len(combinations)): combinations.append(combinations[i].union([obj])) combinations.append(set([obj])) print("all combinations generated") return combinations
9d4324170dcb21779290398ad744dec32fff30be
14,341
def reverse_bits(n, width=8): """Reverse bit order (not the fastest way)""" b = '{:0{width}b}'.format(n, width=width) return int(b[::-1], 2)
d001babf6d3219ff9de7994e2719ebafe9561208
14,342