content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def puncstart(str1, punctuation): """returns all the punctuation from the start of the string""" newstring = u"" for c in str1: if c in punctuation or c.isspace(): newstring += c else: break return newstring
57d686d2a38e9d28201f9d88a70d7a03610bbded
697,307
import os def available_ifos(): """List available included pre-defined IFOs""" ifos = [] root = os.path.dirname(__file__) for f in os.listdir(root): if os.path.isdir(os.path.join(root, f)) and f[0] != "_": ifos.append(f) return sorted(ifos)
9a6cf2a27928468139f1f9b8cd1c353b1d64e37f
697,308
def _date(api): """Returns UTC YYYY.MM.DD to use in tags.""" return api.time.utcnow().strftime('%Y.%m.%d')
91e5c4c0bafa00b33e2b9c22d28b255a50f0d091
697,309
import json def clean_data() -> list[dict[str, str]]: """Read data from file and generate some new composite fields. Yields: dict: A record with the new composite/cleaned fields. """ data = [] with open("data/no_duplicates.jsonl", "r") as file: for line in file: info: dict[str, str] = json.loads(line) address = info.get("EventAddr", "").strip() city = info.get("EventCity", "").strip() state = info.get("EventState", "").strip() zip_code = info.get("EventZip", "").strip() combined_address = f"{address}, {city}, {state} {zip_code}" info["combined_address"] = combined_address causea = info.get("CauseA", "").strip() causeb = info.get("CauseB", "").strip() cause_other = info.get("CauseOther", "").strip() combined_causes = f"{causea}, {causeb}, {cause_other}" info["combined_causes"] = combined_causes data.append(info) return data
8380904159cd7f471bf1edaad23a3ff715bf4da9
697,310
def normal_func_parameter(plane): """ Calculate coefficients for plane function pt1 = [x1, y1, z1] pt2 = [x2, y2, z2] pt3 = [x3, y3, z3] vector1 = [x2 - x1, y2 - y1, z2 - z1] vector2 = [x3 - x1, y3 - y1, z3 - z1] """ pt1 = plane[0] pt2 = plane[1] pt3 = plane[2] vector1 = [pt2[0] - pt1[0], pt2[1] - pt1[1], pt2[2] - pt1[2]] vector2 = [pt3[0] - pt1[0], pt3[1] - pt1[1], pt3[2] - pt1[2]] cross_product = [vector1[1] * vector2[2] - vector1[2] * vector2[1],\ -1 * (vector1[0] * vector2[2] - vector1[2] * vector2[0]),\ vector1[0] * vector2[1] - vector1[1] * vector2[0]] a = cross_product[0] b = cross_product[1] c = cross_product[2] d = - (cross_product[0] * pt1[0] + cross_product[1] * pt1[1] + cross_product[2] * pt1[2]) return a, b, c, d
701e0416d501a40093b8dfd36c02cc0b45072aed
697,311
def lreplace(a, b, string): """ Replaces the head of the string. """ if string.startswith(a): return b + string[len(a):] return string
73ac9d588c98350699bdcd0e2ea70035cd17c77c
697,312
import torch def one_hot_encode(y: torch.Tensor, num_classes: int) -> torch.Tensor: """ Creates the one hot encoding of the provided class labels. :param y: class labels of size (batch_size,) :return y_one_hot: one hot encoding of size (batch_size, num_classes) """ return torch.zeros(y.size(0), num_classes).scatter_(1, y.view(-1, 1), 1.)
ba1cc5d556e17fa7d797efa32e697a9bdac4a3c0
697,313
def color_distance(c1, c2): """ Metric to define the visual distinction between two (r,g,b) colours. Inspired by: https://www.compuphase.com/cmetric.htm :param c1: (r,g,b) colour tuples. r,g and b are values between 0 and 1. :param c2: (r,g,b) colour tuples. r,g and b are values between 0 and 1. :return: distance: float representing visual distinction between c1 and c2. Larger values = more distinct. """ r1, g1, b1 = c1 r2, g2, b2 = c2 mean_r = (r1 + r2) / 2 delta_r = (r1 - r2) ** 2 delta_g = (g1 - g2) ** 2 delta_b = (b1 - b2) ** 2 distance = (2 + mean_r) * delta_r + 4 * delta_g + (3 - mean_r) * delta_b return distance
1f63d563d71373d7c27e446794745bcb2631e013
697,314
def tochr(lst): """Converts every value of a list into a character""" return [chr(i) for i in list(lst)]
36bf8ead73aaf079cbf1f2d5f3cd243266f7fbd2
697,315
def find_pareto_front(population): """Finds a subset of nondominated individuals in a given list :param population: a list of individuals :return: a set of indices corresponding to nondominated individuals """ pareto_front = set(range(len(population))) for i in range(len(population)): if i not in pareto_front: continue ind1 = population[i] for j in range(i + 1, len(population)): ind2 = population[j] # if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated # to prevent excessive growth of the Pareto front if ind2.fitness.dominates(ind1.fitness) or ind1.fitness == ind2.fitness: pareto_front.discard(i) if ind1.fitness.dominates(ind2.fitness): pareto_front.discard(j) return pareto_front
2e53edda5e3d3afd541cedff3bb3d247f913969c
697,316
def __walk_chain(rel_dict, src_id): """ given a dict of pointing relations and a start node, this function will return a list of paths (each path is represented as a list of node IDs -- from the first node of the path to the last). Parameters ---------- rel_dict : dict a dictionary mapping from an edge source node (node ID str) to a set of edge target nodes (node ID str) src_id : str Returns ------- paths_starting_with_id : list of list of str each list constains a list of strings (i.e. a list of node IDs, which represent a chain of pointing relations) """ paths_starting_with_id = [] for target_id in rel_dict[src_id]: if target_id in rel_dict: for tail in __walk_chain(rel_dict, target_id): paths_starting_with_id.append([src_id] + tail) else: paths_starting_with_id.append([src_id, target_id]) return paths_starting_with_id
24b3bea13f7d802043b595e48bedc6da2a782370
697,317
def getIndexesToParse(content: str, searchFor: str = '$'): """Gets the indexes of the searched substring into a string content @type content: str @param content: The parameter content @type searchFor: str @param searchFor: The substring to be searched indexes on the given content @returns list: The positions indexes of the searched substring """ return [i for i, char in enumerate(content) if char == searchFor]
a087a69dc70467f5d8483b892132650585f9a416
697,318
from typing import Any import importlib def dynamic_import_from(source_file: str, class_name: str) -> Any: """Do a from source_file import class_name dynamically Args: source_file (str): Where to import from class_name (str): What to import Returns: Any: The class to be imported """ module = importlib.import_module(source_file) return getattr(module, class_name)
90c861f727c8e6f20f89b7af24c2163bc65bd516
697,319
from bs4 import BeautifulSoup def prettify_html(html_str): """ :param html_str: :return: """ soup = BeautifulSoup(html_str, 'html.parser') return soup.prettify()
fedb96a62ba041a391fe67014c67444afa8e9163
697,320
import numpy def interleave(arr): """Returns the interleaved array. Example: [10000, 3, 32, 32] => [10000, 32, 32, 3]. """ last = arr.shape[-3] b = numpy.empty((arr.shape[0],) + arr.shape[2:] + (last,), arr.dtype) if len(b.shape) == 4: for i in range(last): b[:, :, :, i] = arr[:, i, :, :] elif len(b.shape) == 3: for i in range(last): b[:, :, i] = arr[i, :, :] else: raise ValueError("a should be of shape 4 or 3.") return b
369d21ae13bdb5e5c85f9f91e0e7ea9999a2c146
697,321
def _update_ZS(stored_dict,this_dict) -> dict: """Updates stats dictionary with values from a new window result Parameters ---------- stored_dict:dict Dictionary to be updated with new data this_dict:dict New data with which to update stored_dict """ out_dict = stored_dict # loop over admin zones in this_dict for k in this_dict.keys(): this_info = this_dict[k] try: stored_info = stored_dict[k] except KeyError: # if stored_dict has no info for zone k (new zone in this window), set it equal to the info from this_dict out_dict[k] = this_info continue # calculate number of visible arable pixels for both dicts by multiplying arable_pixels with percent_arable arable_visible_stored = (stored_info["arable_pixels"] * stored_info["percent_arable"] / 100.0) arable_visible_this = (this_info["arable_pixels"] * this_info["percent_arable"] / 100.0) try: # weight of stored_dict value is the ratio of its visible arable pixels to the total number of visible arable pixels stored_weight = arable_visible_stored / (arable_visible_stored + arable_visible_this) except ZeroDivisionError: # if no visible pixels at all, weight everything at 0 stored_weight = 0 try: # weight of this_dict value is the ratio of its visible arable pixels to the total number of visible arable pixels this_weight = arable_visible_this / (arable_visible_this + arable_visible_stored) except ZeroDivisionError: # if the total visible arable pixels are 0, everything gets weight 0 this_weight = 0 ## weighted mean value value = (stored_info['value'] * stored_weight) + (this_info['value'] * this_weight) ## sum of arable pixels arable_pixels = stored_info['arable_pixels'] + this_info['arable_pixels'] ## directly recalculate total percent arable from sum of arable_visible divided by arable_pixels percent_arable = ((arable_visible_stored + arable_visible_this) / arable_pixels) * 100 #percent_arable = (stored_info['percent_arable'] * stored_weight) + (this_info['percent_arable'] * this_weight) out_dict[k] = {'value':value,'arable_pixels':arable_pixels,'percent_arable':percent_arable} return out_dict
4afd294a05228e53bfa6874651d7b5d7f18f972e
697,322
def findAllPerson(tx): """ Method that finds all the nodes Person in the data base :param tx: is the transaction :return: a list of nodes """ query = ( "MATCH (p:Person) " "RETURN p , ID(p);" ) results = tx.run(query).data() return results
c5e5af2c1267b7bc057135dc4cb64e2bb2c23198
697,323
def integral_image(image): """Create an integral image on the first 2 dimensions from the input. Args: image -- ndarray with ndim >= 2 Returns an integral image where every location i,j is the cumulative sum of all preceeding pixels. """ return image.cumsum(1).cumsum(0)
567228d7089db2a58047bd5123a371c516198ab1
697,324
import numpy def zeros(shape=()): """ Returns the ``shape`` zero tensor. :param shape: input shape for the identity tensor :type shape: ``tuple`` of ``int`` :return: array of shape filled with zeros :rtype: ``numpy.ndarray`` """ if len(shape)>0: out=numpy.zeros(shape,numpy.float64) else: out=0. return out
262e52ff70ab86abb33d94d0000dff0f59a1820b
697,325
def all_segments(n: int): """Generate all segments combinations""" return ((i, j, k) for i in range(n) for j in range(i + 2, n) for k in range(j + 2, n + (i > 0)))
23e6dbe5254f3d93299446a5a7bd3468fa277e78
697,326
def installed_headers_for_dep(dep): """Convert a cc_library label to a DrakeCc provider label. Given a label `dep` for a cc_library, such as would be found in the the `deps = []` of some cc_library, returns the corresponding label for the matching DrakeCc provider associated with that library. The returned label is appropriate to use in the deps of of a `drake_installed_headers()` rule. Once our rules are better able to call native rules like native.cc_binary, instead of having two labels we would prefer to tack a DrakeCc provider onto the cc_library target directly. Related links from upstream: https://github.com/bazelbuild/bazel/issues/2163 https://docs.bazel.build/versions/master/skylark/cookbook.html#macro-multiple-rules """ suffix = ".installed_headers" if ":" in dep: # The label is already fully spelled out; just tack on our suffix. result = dep + suffix else: # The label is the form //foo/bar which means //foo/bar:bar. last_slash = dep.rindex("/") libname = dep[last_slash + 1:] result = dep + ":" + libname + suffix return result
a25a50fa519018fec57b49c3d66a88b20cf21777
697,327
def hex_short(value): """ Give an integer in value, convert it to a hexidecial but remove the 0x :param value: the integer :return: the shorted version of hex """ hex_value = hex(value)[2:] if len(hex_value) == 1: hex_value = f"0{hex_value}" return hex_value
d7bb12b06001c44bdddfbe0c4f8d810c27862601
697,328
def storage_add_data(args): """ Build the config file """ content = { "apiVersion": "kadalu-operator.storage/v1alpha1", "kind": "KadaluStorage", "metadata": { "name": args.name }, "spec": { "type": args.type, "storage": [] } } # Device details are specified if args.device: for devdata in args.device: node, dev = devdata.split(":") content["spec"]["storage"].append( { "node": node, "device": dev } ) return content # Path is spacified instead of Raw device if args.path: for pathdata in args.path: node, path = pathdata.split(":") content["spec"]["storage"].append( { "node": node, "path": path } ) return content # If PVC is specified instead of Raw device and Path if args.pvc: for pvc in args.pvc: content["spec"]["storage"].append( { "pvc": pvc } ) return content # External details are specified if args.external: for voldata in args.external: node, vol = voldata.split(":") content["spec"]["storage"].append( { "gluster_host": node, "gluster_volname": vol.strip("/") } ) return content return ""
3a145e64907b54e2b64980b4d25d8c8f961a4cef
697,329
def lca(T, v, w): """ The lowest common ancestor (LCA) of two nodes v and w in a tree or directed acyclic graph (DAG) T is the lowest (i.e. deepest) node that has both v and w as descendants, where we define each node to be a descendant of itself (so if v has a direct connection from w, w is the lowest common ancestor). """ if T is None: return None # This is the LCA if T.value == v or T.value == w: return T # Explore subtrees. left_lca = lca(T.left, v, w) right_lca = lca(T.right, v, w) if left_lca and right_lca: return T return left_lca if left_lca is not None else right_lca
1f0235c6364803aae4a28a9ad72ab433f22ef2a5
697,330
import re def is_uuid(uuid): """ Check if value is a proper uuid :param uuid: string to check :return: bool """ UUID_PATTERN = re.compile(r'^[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}$', re.IGNORECASE) if UUID_PATTERN.match(uuid): return True return False
091464f66018c9913c19dcf307c1e193348c7023
697,331
from pathlib import Path import json def parse_annotation_background_width(bpmn_path: Path): """Get the width the image was resized to when annotating in the BPMN Annotator tool""" assert bpmn_path.suffix == ".bpmn", f"{bpmn_path}" img_meta_line = bpmn_path.read_text().split("\n")[1] assert img_meta_line.startswith( "<!--" ), f"{bpmn_path} has no meta line, line 1: {img_meta_line}" img_meta = json.loads(img_meta_line.replace("<!-- ", "").replace(" -->", "")) return img_meta["backgroundSize"]
aa1315aa2301a823d5e6fe83089698edd7600e4b
697,332
def parseRegionSetName(regionSetName): """ Get region and setName from regionSetName """ if '.' in regionSetName: region,setName = regionSetName.split('.') else: region,setName = 'Assembly',regionSetName return region,setName
ebda9b44833e42127c7bec1169b9660137c8daa3
697,333
def flatten_object(obj, result=None): """ Convert a JSON object to a flatten dictionary. example: { "db": { "user": "bar" }} becomes {"db.user": "bar" } """ if not result: result = {} def _flatten(key_obj, name=''): if isinstance(key_obj, dict): for item in key_obj: arg = str(name) + str(item) + '.' _flatten(key_obj[item], arg) elif isinstance(key_obj, list): index = 0 for item in key_obj: arg = str(name) + str(index) + '.' _flatten(item, arg) index += 1 else: result[name[:-1]] = key_obj _flatten(obj) return result
3a86a9c3ed1ad4add6027207a074ba0c0ed1b3c8
697,334
def check_view_filter_and_group_by_criteria(filter_set, group_by_set): """Return a bool for whether a view can be used.""" no_view_group_bys = {"project", "node"} # The dashboard does not show any data grouped by OpenShift cluster, node, or project # so we do not have views for these group bys if group_by_set.intersection(no_view_group_bys) or filter_set.intersection(no_view_group_bys): return False return True
8baeeb827ba092a3b5262f076ba1fa77aaf55b5f
697,335
import torch def convert_xywh_to_xyxy(boxes): """ Parameters ---------- boxes : torch.Tensor Tensor of shape (..., 4). Returns torch.Tensor Tensor of shape (..., 4). """ num_dims = boxes.ndim x, y, w, h = torch.split(boxes, 1, dim=-1) xmin = x - w / 2 ymin = y - h / 2 xmax = x + w / 2 ymax = y + h / 2 boxes_xyxy = torch.cat([xmin, ymin, xmax, ymax], dim=num_dims - 1) return boxes_xyxy
e97e18c7d35433dc5b7ca4254c4a7d617da550da
697,336
def sentence2feature(sentences): """句子形式的语料转化为特征和标签""" features, tags = [], [] for index in range(len(sentences)): feature_list, tag_list = [], [] for i in range(len(sentences[index])): feature = {"w0": sentences[index][i][0], "p0": sentences[index][i][1], "w-1": sentences[index][i - 1][0] if i != 0 else "BOS", "w+1": sentences[index][i + 1][0] if i != len(sentences[index]) - 1 else "EOS", "p-1": sentences[index][i - 1][1] if i != 0 else "un", "p+1": sentences[index][i + 1][1] if i != len(sentences[index]) - 1 else "un"} feature["w-1:w0"] = feature["w-1"] + feature["w0"] feature["w0:w+1"] = feature["w0"] + feature["w+1"] feature["p-1:p0"] = feature["p-1"] + feature["p0"] feature["p0:p+1"] = feature["p0"] + feature["p+1"] feature["p-1:w0"] = feature["p-1"] + feature["w0"] feature["w0:p+1"] = feature["w0"] + feature["p+1"] feature_list.append(feature) tag_list.append(sentences[index][i][-1]) features.append(feature_list) tags.append(tag_list) return features, tags
13dc624a1bb2fef9d03ce7a9ccf1e8e0f2d72c7b
697,337
import numpy def inner_product(state_1, state_2): """Compute inner product of two states.""" return numpy.dot(state_1.conjugate(), state_2)
37dcc39ff3b18dae95bb5959ba1afe9e5954fcf9
697,338
import numpy def find_max_abs_stack(stack, windowstack, couplingmatrix): """Find the location and value of the absolute maximum in this stack :param stack: stack to be searched :param windowstack: Window for the search :param couplingmatrix: Coupling matrix between difference scales :return: x, y, scale """ pabsmax = 0.0 pscale = 0 px = 0 py = 0 nscales = stack.shape[0] assert nscales > 0 pshape = [stack.shape[1], stack.shape[2]] for iscale in range(nscales): if windowstack is not None: resid = stack[iscale, :, :] * windowstack[iscale, :, :] / couplingmatrix[iscale, iscale] else: resid = stack[iscale, :, :] / couplingmatrix[iscale, iscale] # Find the peak in the scaled residual image mx, my = numpy.unravel_index(numpy.abs(resid).argmax(), pshape) # Is this the peak over all scales? thisabsmax = numpy.abs(resid[mx, my]) if thisabsmax > pabsmax: px = mx py = my pscale = iscale pabsmax = thisabsmax return px, py, pscale
bde60c76f71ce9fc92c5a17436af1fddfa190dab
697,339
def ping(event): """Responds 'pong' to your 'ping'.""" return 'pong'
48d07ca1e513b28212c56758c41e1757b5f75468
697,340
import functools import operator def _has_pattern_match(name: str, patterns) -> bool: """Check if name matches any of the patterns""" return functools.reduce( operator.or_, map(lambda r: r.search(name) is not None, patterns), False)
1dd6ad54ee35db20b6fcec0495e4c4ef61788aa0
697,341
import torch def _get_zero_grad_tensor(device): """ return a zero tensor that requires grad. """ loss = torch.as_tensor(0.0, device=device) loss = loss.requires_grad_(True) return loss
5bf67a7ab0ff50e041fcd07484d6f5468acf6d4e
697,343
import argparse def get_parser(): """Return the argument parser for the test runner""" parser = argparse.ArgumentParser(__name__) parser.add_argument(metavar="label", action="append", nargs="*", dest="labels") return parser
0d72692f59a47660df6c2e54217a7ba8ea6bcff5
697,344
def update_borders(bb: list, roi_size: list): """ Update bounding box borders according to roi_size. Borders are updated from center of image. :param bb: original bounding box :param roi_size: output bounding box size :return: modified bounding box """ mid_x = bb[0] + bb[2] / 2 mid_y = bb[1] + bb[3] / 2 new_x = int(mid_x - roi_size[1] / 2) new_y = int(mid_y - roi_size[0] / 2) return [new_x, new_y, roi_size[0], roi_size[1]]
15574e03cc3bea0b33e5f20014bc83ea60b47bfc
697,345
import random def random_up(sentence: str) -> str: """ Randomly makes some letters lower case and some upper case Parameters: sentence(str): the sentence to mix case of the letters in Returns: str: modified sentence Examples: >>> random_up('abc') 'Abc' >>> random_up('abcdef') 'acDeBf' >>> random_up('i like python') 'i lIKE pYThOn' """ return ''.join(random.choice((str.upper, str.lower))(c) for c in sentence)
15d7300e258b54462ae51b8f70a1b8e8a59e0120
697,347
def read_File_text_0(sfile): """ read file text to Array String/ String """ ar = [] sres = '' try: try:# python 3 ins = open( sfile, mode="r", encoding='UTF-8') except:# python 2 ins = open( sfile, mode="r") # for line in ins: ar.append(line.replace("\n","")) sres+=line ins.close() except OSError: raise Exception(OSError.strerror) # return ar,sres
f0752ee84a2e58fbcc6162eb2f47aac623d111d6
697,348
import json def load_scenario(json_file_path): """ Load scenario from JSON """ json_file = open(json_file_path, 'r') parsed_json = json.loads(json_file.read()) json_file.close() return parsed_json
005eb73753fcbbd873e5cb73fa54142990ebc97d
697,349
def to_deg(value, loc): """convert decimal coordinates into degrees, munutes and seconds tuple Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"] return: tuple like (25, 13, 48.343 ,'N') """ if value < 0: loc_value = loc[0] elif value > 0: loc_value = loc[1] else: loc_value = "" abs_value = abs(value) deg = int(abs_value) t1 = (abs_value-deg)*60 min = int(t1) sec = round((t1 - min)* 60, 5) return (deg, min, sec, loc_value)
5c2eb955bc3e05f5f8378bc0df2ed16ea7f7cf3b
697,350
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(1) """ if nums == None: return 0 if len(nums) == 0: return 0 max_sum = nums[0] curr_sum = nums[0] for i in range(1, len(nums)): curr_sum = max(curr_sum + nums[i], nums[i]) max_sum = max(curr_sum, max_sum) return max_sum
729f277b61b517fe7e434812576347fe6742401c
697,351
import pathlib def get_package_folder_name(): """Attempt to guess the built package name.""" cwd = pathlib.Path.cwd() directories = [ path for path in cwd.iterdir() if pathlib.Path(cwd, path).is_dir() and pathlib.Path(cwd, path, '__init__.py').is_file() and path.name != 'test'] assert len(directories) == 2, directories return directories[0].name
1ecfcbd46fbc960f37f89b3f8beb8604e83fa041
697,352
import random import time def delay(fn, opts, task, *args, **kwargs): """ delay(t=5, stddev=0., pdf="gauss") Wraps a bound method of a task and delays its execution by *t* seconds. """ if opts["stddev"] <= 0: t = opts["t"] elif opts["pdf"] == "gauss": t = random.gauss(opts["t"], opts["stddev"]) elif opts["pdf"] == "uniform": t = random.uniform(opts["t"], opts["stddev"]) else: raise ValueError("unknown delay decorator pdf '{}'".format(opts["pdf"])) time.sleep(t) return fn(task, *args, **kwargs)
66b79012d62f1a4fc6b0cb1d79a3aea68fcdc0e1
697,353
from typing import Dict import collections def quantity_contained(container: Dict[str, collections.Counter], color: str) -> int: """Quantity of bags contained.""" counter = 0 if color in container and container[color] is not None: for bag_color, quantity in container[color].items(): counter += ( quantity * (1 + quantity_contained(container, bag_color)) ) return counter
f3262a4fc2a8228432e68a86c6728c89e8ed5d18
697,354
def add_camera_args(parser): """Add parser augument for camera options.""" parser.add_argument('--video', type=str, default=None, help='video file name, e.g. traffic.mp4') parser.add_argument('--video_looping', action='store_true', help='loop around the video file [False]') parser.add_argument('--onboard', type=int, default=None, help='Jetson onboard camera [None]') parser.add_argument('--copy_frame', action='store_true', help=('copy video frame internally [False]')) parser.add_argument('--do_resize', action='store_true', help=('resize image/video [False]')) parser.add_argument('--width', type=int, default=640, help='image width [640]') parser.add_argument('--height', type=int, default=480, help='image height [480]') return parser
21f74ccd2a092ba9620f40bb1a0a9c5c6dbf72a5
697,356
def process_wildcard(fractions): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ wildcard_zs = set() total_fraction = 0.0 for z, fraction in fractions.items(): if fraction == "?": wildcard_zs.add(z) else: total_fraction += fraction if not wildcard_zs: return fractions balance_fraction = (1.0 - total_fraction) / len(wildcard_zs) for z in wildcard_zs: fractions[z] = balance_fraction return fractions
6358bfc7ee3b7f187b375b1df2cecd8ce104674a
697,357
def cover_email(email): """Replaces part of the username with dots >>> cover_email('hello@example.com') 'hel&hellip;@example.com' """ try: [user, domain] = email.split('@') except ValueError: # handle invalid emails... sorta user = email domain = '' if len(user) <= 4: user_prefix = user[:1] elif len(user) <= 6: user_prefix = user[:3] else: user_prefix = user[:4] return f'{user_prefix}&hellip;@{domain}'
e24cb3273335504f9d0d245138752fa0b7cf53df
697,358
from pathlib import Path import sys def module_name() -> str: """Get the current module name.""" return Path(sys.argv[0]).stem
f6ccaad8e1b32fe2447d90b277dd1749b725f865
697,359
def _convert_ketone_unit(raw_value): """Convert raw ketone value as read in the device to its value in mmol/L.""" return int((raw_value + 1) / 2.) / 10.
804d34ecc9d901f3d958ebee34282885248c3499
697,361
import base64 def secret_to_bytes(secret): """Convert base32 encoded secret string to bytes""" return base64.b32decode(secret)
81720bc65fa4be6a18cf0ba461adaf7b9227a417
697,362
import requests import json def call(method, url, payload=None, headers={'Content-Type': 'application/json'}): """ Use for calling APIs. Usage call('delete', 'http://localhost:8888/delete') """ return getattr(getattr(requests, method)(url, data=json.dumps(payload), headers=headers), 'json')()
5f03eab3ad4b125feeaae4166e1188f9e166a688
697,364
def create_csv_header(): """Creates a CSV header row for dialogue upload to Crowdflower. Arguments: dg -- a Django dialogue object """ return 'cid,code,code_gold'
b0cb1b6a2b8483fe4324431099e194c6ecbeb657
697,365
import numpy as np import logging def run_statistics(df, feature): """Takes DataFrame and calculates summary statistics for the experiment. The data must contain the 'Status' column, defining each row as 'Sample', 'Positive' or 'Negative' control, or 'Reference'. 'Reference' wells are excluded from the analysis. """ logging.basicConfig(level = logging.INFO) st = None df = df[df.Status != 'Reference'][[feature, 'Status']] st = df.groupby(['Status']).agg([np.size, np.mean, np.std, np.var]) st.columns = st.columns.droplevel() st['Feature'] = feature if 'Positive' in df.Status.unique() and 'Negative' in df.Status.unique(): st['Z_factor'] = 1 - 3*(st.at['Positive','std'] + st.at['Negative','std'])/abs(st.at['Positive','mean'] - st.at['Negative','mean']) st['SB'] = st.at['Positive','mean']/st.at['Negative','mean'] st = st.reset_index()[['Feature', 'Status', 'size', 'mean', 'std', 'var', 'Z_factor', 'SB' ]] else: logging.info(f'run_statistics: Failed calculate Z factor. Positive or Negative control is missing.\n') st = st.reset_index()[['Feature', 'Status', 'size', 'mean', 'std', 'var']] pass return st
276a429774e8a285ad784a3aa57320fda93fb1a6
697,366
def get_longest_consecutive_sequence(array: list): """given array return longest consecutive sequence length the following algorithm provides the length in O(N) time """ if len(array) < 1: return 0 hashmap = {number: True for number in array} longest_consecutive = 1 # array must have a value while hashmap: key = next(iter(hashmap.keys())) distance_from_key = 1 current_consecutive = 1 # check descending consecutive integers while hashmap.get(key - distance_from_key, False): del hashmap[key - distance_from_key] current_consecutive += 1 distance_from_key += 1 distance_from_key = 1 # check ascending consecutive integers while(hashmap.get(key + distance_from_key)): del hashmap[key + distance_from_key] current_consecutive += 1 distance_from_key += 1 if current_consecutive > longest_consecutive: longest_consecutive = current_consecutive del hashmap[key] return longest_consecutive
4e51defc98c7e899263b7519b1f3f85c190fa43a
697,367
import click from typing import Iterator def extra_context_callback( context: click.Context, parameter: click.Parameter, args: tuple[str, ...] ) -> dict[str, str]: """Callback for the EXTRA_CONTEXT argument.""" def _generate() -> Iterator[tuple[str, str]]: for arg in args: try: key, value = arg.split("=", 1) yield key, value except ValueError: raise click.BadParameter( "EXTRA_CONTEXT should contain items of the form key=value; " f"'{arg}' doesn't match that form" ) return dict(_generate())
656b2bb6a54c302ddeb63c82d238e4cbe304de70
697,369
def find_constraints(mol, sites): """ Find the NwChem constraints for a selection of sites on a molecule. """ # Find the corresponding atom numbers in string format site_numbers = [] for i in range(len(mol.sites)): if mol.sites[i] in sites: site_numbers.append(str(i + 1) + ' ') site_numbers = ''.join(site_numbers) # Set up the constraints on the atoms return {'fix atom': site_numbers}
978bef23a58ec670e4bf002b16ebca580acb5663
697,370
def decomp_proc (values): """ Given the row of predictions for the decomposition mode test output, remove the pointless training zeroes from the time :param values: A row of the decomposition predictions .csv, split by commas into a list :return: Same row with pointless zeros after the time removed """ time = values[1] times = time.split(".") assert (len(times) == 2) assert (times[1] == "000000") values[1] = times[0] return values
6898dc6586ffed0b16dbde1159861eccc63206c5
697,371
def _non_adjacent_filter(self, cmd, qubit_graph, flip=False): """A ProjectQ filter to identify when swaps are needed on a graph This flags any gates that act on two non-adjacent qubits with respect to the qubit_graph that has been given Args: self(Dummy): Dummy parameter to meet function specification. cmd(projectq.command): Command to be checked for decomposition into additional swap gates. qubit_graph(Graph): Graph object specifying connectivity of qubits. The values of the nodes of this graph are unique qubit ids. flip(Bool): Flip for switching if identifying a gate is in this class by true or false. Designed to meet the specification of ProjectQ InstructionFilter and DecompositionRule with one function. Returns: bool: When flip is False, this returns True when a 2 qubit command acts on non-adjacent qubits or when it acts only on a single qubit. This is reversed when flip is used. """ if qubit_graph is None: return True ^ flip total_qubits = (cmd.control_qubits + [item for qureg in cmd.qubits for item in qureg]) # Check for non-connected gate on 2 qubits if ((len(total_qubits) == 1) or (len(total_qubits) == 2 and qubit_graph.is_adjacent( qubit_graph.find_index(total_qubits[0].id), qubit_graph.find_index(total_qubits[1].id)))): return True ^ flip return False ^ flip
9d3a55341c2a1410c5c1864ce5fcd6ea177d4026
697,373
def str2bool(str): """Tells whether a string is a Yes (True) or a No (False)""" if str.lower() in ["y", "yes"]: return True elif str.lower() in ["n", "no"]: return False else: raise Exception("Please enter Yes or No")
ba3f8a2fdca089ae5cdc608a04091009a57a7662
697,374
import argparse def build_parser(parser=None, **argparse_options): """ Args: parser (argparse.ArgumentParser): **argparse_options (dict): Returns: """ if parser is None: parser = argparse.ArgumentParser( **argparse_options, description="""Search files recursively starting at 'base_directory' and count the differents codecs found""" ) parser.add_argument('-i', '--input', nargs='+', required=True, dest='l_in_media_fp' ) parser.add_argument('-o', '--output', required=True, dest='out_media_fp' ) return parser
4122b7513f93bb68f20d7f318b3d2eb5460a3539
697,375
def body_title(entry: str): """Choose what is body and what is title. Arguments: entry {str} -- entry to decide on """ entry = entry[17:] if "\n" in entry: split = entry.split("\n") else: split = entry.split() if split: title = split[0] split.pop(0) body = " ".join(split) else: title, body = "","" return title, str(body)
c90ae8480be3c5670ec9016cf6a68fc3cf6601a3
697,376
import re def re_group(string: str) -> str: """ >>> re_group('..12345678910111213141516171820212223') '1' """ match = re.search(r'([a-zA-Z0-9])\1+', string) return match.group(1) if match else '-1'
d7e75a1af62f3266d789fda52ce49e9ff99b17b0
697,377
def plusOne(digits): """ :type digits: List[int] :rtype: List[int] """ return list(str(int("".join(str(e) for e in digits))+1)) #return list(str(int("".join(digits))+1))
cbdfdc3acd57a89af1a96e16acd7a3976c74aa4c
697,378
def _make_value_divisible(value, factor, min_value=None): """ It ensures that all layers have a channel number that is divisible by 8 :param v: value to process :param factor: divisor :param min_value: new value always greater than the min_value :return: new value """ if min_value is None: min_value = factor new_value = max(int(value + factor / 2) // factor * factor, min_value) if new_value < value * 0.9: new_value += factor return new_value
89561ca1551b988030b3d0b4fdd5d17f5664607f
697,380
def contig_count(contig): """Return a count of contigs from a fasta file""" return sum([1 for line in open(contig, 'r').readlines() if line.startswith('>')])
dde6bbcf5799dbea197c2b0f391bf26e9ac960b6
697,381
from pathlib import Path def contour(anchor): """contour(anchor) -> Path Returns a Path object representing the contour starting with the Dart anchor. (syntactic sugar for 'Path(anchor.phiOrbit())')""" return Path(anchor.phiOrbit())
c2fee570e3feb753439c5af941b0658e2aa49bbc
697,382
import sys import traceback def import_class(import_str): """ Returns a class from a string that specifies a module and/or class :param import_str: import path, e.g. 'httplib.HTTPConnection' :returns imported object :raises: ImportedError if the class does not exist or the path is invalid """ (mod_str, separator, class_str) = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%)' % (class_str, traceback.format_exception(*sys.exc_info())))
950745dd8e71820099d1fc7d002cd485652ad7c9
697,383
def tcl(s, *args): """tcl(s, *args) -> str. Run a tcl command. The arguments must be strings and passed to the command. If no arguments are given and the command has whitespace in it then it is instead interpreted as a tcl program (this is deprecated). @param s: TCL code. @param args: The arguments to pass in to the TCL code. @return: Result of TCL command as string. """ return ''
3493531fa272dd91b61783d67fd5b91ff928158b
697,384
def HPX_grid_size(Nside): """Return the size of the pixel grid (Nx, Ny) for a given Nside""" Nx = 8 * Nside Ny = 4 * Nside + 1 return Nx, Ny
8e267c467ed52ef24b1d540cff9aeac84a1e1bb4
697,385
def chk_typ(ty): """ const __global char *__private __private int void __global char *__private __local int *__private const __private int __global float *const __private """ global id2obj, name2obj, rec2obj, td2obj if 'attribute' in ty: _ = 2+2 return is_pointer = False tl = ty.split() if tl[-1] == '__attribute__((cdecl))': tl = tl[:-1] td = {} i0 = 0 while tl[i0] in ('const','__global','__local','__private'): i0 +=1 i = i0 # i0 = i = 1 if tl[0] == 'const' else 0; while i < len(tl) and (tl[i].isidentifier()): td[tl[i]] = td.get(tl[i],0) + 1 i += 1 while i < len(tl) and tl[i] in ('*','*const','*__private','*restrict','*volatile','**','**restrict','(*)[11]','(*)[n]','(*)[20]','(*)[50]')+('***','*[1]','[]','[8]','[9]','[12]','[24]','[50]','[64]','[101]','[101][101]','[101][102]','[102][102]'): is_pointer = True i += 1 if i < len(tl): assert tl[i].startswith('(*)('), ty # (*)(void), (*)(const void *, const void *) is_pointer = True # if 'struct' in td: assert 'union' not in td and 'struct' == tl[i0], tl sn = tl[i0+1] if sn not in name2obj: assert is_pointer return 'P' snd = name2obj[sn] assert snd['kind'] == 'RecordDecl' and snd['tagUsed'] == 'struct', tl assert len(tl) in (i0+2,i0+3), tl if len(tl) == i0+3: assert tl[-1] in ('*','*restrict','**'), tl return 'P' else: sz = ty_rec_sz(snd) return 'X' if sz <= 8 else 'Y' elif 'union' in td: assert 'union' == tl[i0], tl sn = tl[i0+1] snd = name2obj[sn] assert snd['kind'] == 'RecordDecl' and snd['tagUsed'] == 'union', tl assert False, ty else: _ = 2+2
adc5975b0f2713f6cf8dfe541beffa6939d24ca5
697,386
def email_signature(): """ Gets the signature for the emails """ signature = ("Regards,\n\n" "The PhysioNet Team,\n" "MIT Laboratory for Computational Physiology,\n" "Institute for Medical Engineering and Science,\n" "MIT, E25-505 77 Massachusetts Ave. Cambridge, MA 02139" ) return signature
4e70285c3b9b9d4aab916205a5d02b2738387e66
697,387
def _get_percent(text): """If text is formatted like '33.2%', remove the percent and convert to a float. Otherwise, just convert to a float. """ if not text: return None if text.endswith('%'): text = text[:-1] return float(text.strip())
2975f0a603a113bf7991753250a83be4da363070
697,388
def contig_to_array(bw, chrm, res = None): """ Convert single basepair bigwig information to a numpy array Args: bw - a pyBigWig object chrm - name of chromosome you want res - resolution you want data at in bp. Returns: outarray - numpyarray at specified resolution """ chrm_length = bw.chroms(chrm) # makes an array at 1 bp resolution out_array = bw.values(chrm, 0, chrm_length, numpy=True) if res: out_array = out_array[::res] return out_array
f09fb80ef5a073bf87f82b0d86c26153db404f4f
697,389
def generic_method(method): """Marks method as generic.""" method._is_generic = True return method
9cc3cb91d61ef013385b345cc377d17bc7c8100c
697,390
def clean_empty_keyvalues_from_dict(d): """ Cleans all key value pairs from the object that have empty values, like [], {} and ''. Arguments: d {object} -- The object to be sent to metax. (might have empty values) Returns: object -- Object without the empty values. """ if not isinstance(d, (dict, list)): return d if isinstance(d, list): return [v for v in (clean_empty_keyvalues_from_dict(v) for v in d) if v] return {k: v for k, v in ((k, clean_empty_keyvalues_from_dict(v)) for k, v in d.items()) if v}
8769e5ceda55588a136fdf131f19aea4c5f06f95
697,391
import random def session_id_generator(size = 8): """ Generating session id """ s = "0123456789ABCDEF" return "".join(random.sample(s,size ))
b57ca24f6cc08cb465562f09495c99f25d72d296
697,392
def get_commands_if_mode_change(proposed, existing, group, mode): """Gets commands required to modify mode of a port-channel Note: requires removing existing config and re-config'ing new port-channel with new mode Args: existing (dict): existing config as defined in nxos_portchannel proposed (dict): proposed config as defined in nxos_portchannel group (str): port-channel group number mode (str): on, active, or passive Returns: list: ordered list of cmds to be sent to device for a change in mode Note: Specific for Ansible module(s). Not to be called otherwise. """ proposed_members = proposed['members'] existing_members = existing['members'] members_to_remove = set(existing_members).difference(proposed_members) members_to_remove_list = list(members_to_remove) members_dict = existing['members_detail'] members_with_mode_change = [] for interface, values in members_dict.iteritems(): if interface in proposed_members \ and (interface not in members_to_remove_list): # Could probabaly make an assumption after checking one instead if values['mode'] != mode: members_with_mode_change.append(interface) commands = [] if members_with_mode_change: for member in members_with_mode_change: commands.append('interface ' + member) commands.append('no channel-group ' + group) for member in members_with_mode_change: commands.append('interface ' + member) commands.append('channel-group {0} mode {1}'.format(group, mode)) return commands
f2aa15cd7db621925d57f0a4f6a80cb5cbe4a664
697,394
def normalize_metric(metric): """监控返回标准,保留两位小数""" return float("%.2f" % float(metric))
f4fb0e6f83be53a9c274dde67e525406ea1a3f53
697,395
def get_timesteps_per_episode(env): """Returns a best guess as to the maximum number of steps allowed in a given Gym environment""" if hasattr(env, "_max_episode_steps"): return env._max_episode_steps if hasattr(env, "spec"): return env.spec.tags.get("wrapper_config.TimeLimit.max_episode_steps") if hasattr(env, "env"): return get_timesteps_per_episode(env.env) return None
40df292632c47b77047f99b51ff5373b9ec64c33
697,396
def _resource_name_package(name): """ pkg/typeName -> pkg, typeName -> None :param name: package resource name, e.g. 'std_msgs/String', ``str`` :returns: package name of resource, ``str`` """ if not '/' in name: return None return name[:name.find('/')]
f434aa1fcfd18797625ca63c7f121dce020f8585
697,398
def state_dict_cpu_copy(chkpt): """save cpu copy of model state, so it can be reloaded by any device""" #if chkpt has things other than state_dict get the state_dict if 'state_dict' in chkpt: state_dict = chkpt['state_dict'] else: state_dict = chkpt for n, p in state_dict.items(): state_dict[n] = p.cpu() return chkpt
a0b5cdeeab3255972be2536bcd2472401f48ec71
697,399
import importlib def get_versio_versioning_scheme(full_class_path): """Return a class based on it's full path""" module_path = '.'.join(full_class_path.split('.')[0:-1]) class_name = full_class_path.split('.')[-1] try: module = importlib.import_module(module_path) except ImportError: raise RuntimeError('Invalid specified Versio schema {}'.format(full_class_path)) try: return getattr(module, class_name) except AttributeError: raise RuntimeError('Could not find Versio schema class {!r} inside {!r} module.'.format(class_name, module_path))
bac16d14ba32c2445acaaf7605e288a62606177b
697,401
def check_svc(tochecks) -> bool: """检查svc的声明规则 list: [("ts","TCP",8080),...],这里ts表示使用了torch serving框架 """ if not isinstance(tochecks, list): return False for pair in tochecks: name, protocol, port = pair if not all([isinstance(name, str), protocol.upper() in ["TCP", "UDP", "HTTP"], isinstance(port, int)]): return False return True
b57afc9ae34be5d14037986c22054b7d4086d1ba
697,402
def hours2days(period): """ uses a tuple to return multiple values. Write an hours2days function that takes one argument, an integer, that is a time period in hours. The function should return a tuple of how long that period is in days and hours, with hours being the remainder that can't be expressed in days. For example, 39 hours is 1 day and 15 hours, so the function should return (1,15). These examples demonstrate how the function can be used: hours2days(24) # 24 hours is one day and zero hours (1, 0) hours2days(25) # 25 hours is one day and one hour (1, 1) hours2days(10000) (416, 16) """ hours_of_day = 24 day = period // hours_of_day hours = period % hours_of_day return day, hours
2198538b30ae9e0b1b1c1ec6f9f7f76ac393cafb
697,403
import csv import random def get_tweet(tweets_file, excluded_tweets=None): """Get tweet to post from CSV file""" with open(tweets_file) as csvfile: reader = csv.DictReader(csvfile) possible_tweets = [row["tweet"] for row in reader] if excluded_tweets: recent_tweets = [status_object.text for status_object in excluded_tweets] possible_tweets = [tweet for tweet in possible_tweets if tweet not in recent_tweets] selected_tweet = random.choice(possible_tweets) return selected_tweet
0734a41413277d0af66acffb55716e396a4ff1bc
697,404
def filter_manifest_definition(manifest_definition, name_filter): """ Filters the manifest to only include functions that partially match the specified filter. :param manifest_definition: Dictionary of the manifest :param name_filter: A function name specified in the manifest :return: Filtered manifest definition """ manifest_definition['functions'] = {key: value for (key, value) in manifest_definition['functions'].items() if name_filter in key.lower()} return manifest_definition
a971fab368aa6339850b785cb752ca00e09f7e5a
697,406
def top_ranked_final_primers(filter_merged_df): """ Drops duplicate sequence ids and keeps first (which also corresponds) to the highest ranking primer pair for each sample. Args: filter_merged_df (DataFrame): input from filter_merged, where primers are only equal to on target primers from initial primer generation. Returns: top_ranked_df (DataFrame): outputs only the highest scoring primer pair at each position """ top_ranked_df = filter_merged_df.drop_duplicates('Sequence ID', keep='first') return top_ranked_df
03f682a03d6454c3b142231fe78d82208117cf09
697,407
def gff3plsorting(gff_file, outgff_file): """ """ inputs = [gff_file] outputs = [outgff_file] options = { 'cores': 1, 'memory': '4g', 'account': 'NChain', 'walltime': '01:00:00' } spec = ''' /home/marnit/bin/gff3sort.pl --chr_order natural {infile} > {outfile} '''.format(infile=gff_file, outfile=outgff_file) return inputs, outputs, options, spec
f2a07d66dd338aad4f12c29d22205bbdee3d20e3
697,408
def words_sent(message_container, start_date, end_date): """ Return number of words sent between start and end date contained by message container (chat/member). """ words_sent = 0 for message in message_container.messages: if (start_date <= message.timestamp.date() <= end_date and message.type == 'text'): words_sent += len(message.words()) return words_sent
c8e466a8d9c1049a6b5066fe0c9efccbbd3ae245
697,409
def field_lookup(field_string): """Attempts to find relevant error field for uniqueness constraint error, given SQL error message; broken off from translate_message logic """ output_format = "{0}; {0}" bad_field = 'code' # assumes this field as a default if field_string.startswith('uq_t_'): bad_field = 'title' elif field_string.endswith('email'): bad_field = 'email' elif field_string.endswith('title'): bad_field = 'title' return output_format.format(bad_field)
782480983e69ca975f80b5df4c16822a6ab41458
697,410
import math def matrix_from_quaternion(quaternion): """Calculates a rotation matrix from quaternion coefficients. Parameters ---------- quaternion : [float, float, float, float] Four numbers that represents the four coefficient values of a quaternion. Returns ------- list[list[float]] The 4x4 transformation matrix representing a rotation. Raises ------ ValueError If quaternion is invalid. Examples -------- >>> q1 = [0.945, -0.021, -0.125, 0.303] >>> R = matrix_from_quaternion(q1) >>> q2 = quaternion_from_matrix(R) >>> allclose(q1, q2, tol=1e-03) True """ sqrt = math.sqrt q = quaternion n = q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2 # dot product # perhaps this should not be hard-coded? eps = 1.0e-15 if n < eps: raise ValueError("Invalid quaternion, dot product must be != 0.") q = [v * sqrt(2.0 / n) for v in q] q = [[q[i] * q[j] for i in range(4)] for j in range(4)] # outer_product rotation = [ [1.0 - q[2][2] - q[3][3], q[1][2] - q[3][0], q[1][3] + q[2][0], 0.0], [q[1][2] + q[3][0], 1.0 - q[1][1] - q[3][3], q[2][3] - q[1][0], 0.0], [q[1][3] - q[2][0], q[2][3] + q[1][0], 1.0 - q[1][1] - q[2][2], 0.0], [0.0, 0.0, 0.0, 1.0]] return rotation
57a38e4214c94e54e2cfb2fd71e1bfa57d4f714b
697,413
from typing import Iterable def create_item_name(item_type: str, item_names: Iterable[str]) -> str: """ Translates an item with a type into a name. For instance, if there are two items of type 'POST', the first will be named 'POST0' and the second will be 'POST1' Args: item_type: Type of item item_names: Names of current items Returns: Translated item name """ name_index = len([name for name in item_names if name.startswith(item_type)]) return f"{item_type}{name_index}"
8ec37473ee0a3dc880dc962b9f1c622c2dfd9af3
697,414
import requests def get_instance_id(): """Check instance metadata for an instance id""" r = requests.get("http://169.254.169.254/latest/meta-data/instance-id") return r.text
16a1104877e47f310c119b43eb52eab6dbeba104
697,416
def hex2str(data): """ :param data: '123' :return: '303132' """ return ''.join('{:02X}'.format(ord(c)) for c in data)
35e046e820251c88867d33e7cccdb6b77c62a43d
697,417
import numpy def batchify(data, bsz): """Transform data into batches.""" numpy.random.shuffle(data) batched_data = [] for i in range(len(data)): if i % bsz == 0: batched_data.append([data[i]]) else: batched_data[len(batched_data) - 1].append(data[i]) return batched_data
2f91d8ff2d68f99b5f32f12fa6df49a1fead2fbc
697,418
import requests from bs4 import BeautifulSoup def fetch_top_list(url, tag, attrs=None): """ use a link to fetch a list of image links :param link: :param attrs: :return: """ state_page = requests.get(url) soup = BeautifulSoup(state_page.content, 'html.parser') return soup.find_all(tag, attrs=attrs)
9f1fc3588fa978642a6b30722c42123977f18e6b
697,419
def process_group(grp): """ Given a list of list of tokens, of the form "A B ... C (contains x, y, z)" where A, B, C are strings of ingredients, and x, y, z are allergens, determine the count of the ingredients that definitely do not contain any allergens. :param grp: A list of list of tokens. :return: A count of the occurrences of "good"(allergen-free) ingredients. """ def parse_line(s): """ Given a single token of the form "A B ... C (contains x, y, z)" parse it and return a set of ingredients and its corresponding allergens. :param s: A string representing token. :return a, b: A tuple of sets representing the ingredients and allergens. """ s = s.replace(" (", " ").replace(", ", " ").replace(")", " ") ingredients, allergens = s.split(" contains ") a = set(ingredients.split(" ")) b = set(allergens.split(" ")) - {""} return a, b aller_to_ing = dict() computed = [] for idx, line in enumerate(grp): ing, aller = parse_line(line) computed.append((set(x for x in ing), set(a for a in aller))) for a in aller: if a not in aller_to_ing: aller_to_ing[a] = {x for x in ing} else: aller_to_ing[a] &= ing # Potentially bad ingredients. potential = set() for x in aller_to_ing.values(): potential |= x counter = 0 for ing, aller in computed: for x in ing: if x not in potential: counter += 1 return counter
001ca014e191baf570be9b4d604756dc3bdbeff1
697,420
def get_installed_tool_shed_repository( app, id ): """Get a tool shed repository record from the Galaxy database defined by the id.""" return app.install_model.context.query( app.install_model.ToolShedRepository ) \ .get( app.security.decode_id( id ) )
d76c3af3f159f9a0882a6b27951a2e8060f4bc28
697,421