content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def is_suffix_of(suffix, trace) -> bool: """ Args: suffix: target suffix trace: trace in question Returns: True if suffix is the suffix of trace. """ if len(trace) < len(suffix): return False else: return trace[-len(suffix):] == suffix
aee62450ac20847dece9debc189cf2948f45a0c0
48,425
def edit_distance(s: str, t: str): """ Return the edit distance between the strings s and t. The edit distance is the sum of the numbers of insertions, deletions, and mismatches that is minimally necessary to transform one string into the other. """ m = len(s) # index i n = len(t) # index j costs = list(range(m + 1)) for j in range(1, n + 1): prev = costs[0] costs[0] += 1 for i in range(1, m + 1): match = int(s[i - 1] == t[j - 1]) c = min( prev + 1 - match, costs[i] + 1, costs[i - 1] + 1, ) prev = costs[i] costs[i] = c return costs[-1]
9c9feaf097b5297370f335bea158be300741792d
48,427
def getAttrText(elem,attr): """ Return text value from a named attribute of the supplied element node. """ return elem.getAttribute(attr)
9229451a2874b17d365ed02fd5b5f841ea920256
48,428
import requests def get_page(url): """Gets HTML for a specific page.""" try: r = requests.get(url) return r.text except requests.exceptions.MissingSchema as _: print(f"Incorrect url missing schema, skipping {url}") return ""
90a2a37195bf8be09a93905e4869d45993c4d9ab
48,429
def generate_annotation(x, y, text, color='black', fsize=12, textangle=0, xref='paper', yref='paper', align='left'): """ Generates a plotly annotation in the form of dictionary. It can be directly be appended to the layout.annotations. :param x: x coordinate in plot :param y: y coordinate in plot :param text: text content :param color: color of text :param fsize: font size :param textangle: angle of text :param xref: reference to 'paper'/canvas or 'plot'/axis coordinates, more details in `plotly` documentation :param yref: reference to 'paper'/canvas or 'plot'/axis coordinate, more details in `plotly` documentation :return: The annotation object to append in the `layout` of the figure. """ res = dict( y=y, x=x, showarrow=False, textangle=textangle, text=text, font=dict( color=color, size=fsize ), align = align, xref=xref, yref=yref ) return res
32465c1dd7d5aee55107b468beb27be6cbf313c5
48,431
def filterDataFrameByValue(df,column,argument): """ Returns a subset of a GeoPandas GeoDataframe Currently only works for single instances of categorical variables. For more complicated cases, either code directly or update this function Args: df (GeoPandas DataFrame): The dataframe to be filtered column (str): The string name of the dataframe column to be filtered argument(var): The value determining which rows to return Returns: filteredDF (GeoPandas DataFrame): A filtered copy of the original df Raises: None Tests: None """ filteredDF = df[df[column]==argument] return filteredDF
a402fcc7b94866641d92636c545bf29712191547
48,433
def has_main_loop(f): """ Simple test to see if a py-file has a method called 'main_loop' """ if not f.lower().endswith(".py"): return False try: descriptor = open(f) except: return False try: while True: try: line = descriptor.readline() except: line = None if not line: break if line.startswith("def main_loop():"): descriptor.close() return True descriptor.close() return False except: descriptor.close() return False
52ba03e94b686c952a81e1dc5b8761abc1326a64
48,442
from typing import OrderedDict def _sort_values_by_step(vals_dict): """Sorts values dictionary by step Args: vals_dict (dict): a values dictionary as created by extract_values Returns: dict: values dictionary with sorted steps """ for k, v in vals_dict.items(): vals_dict[k] = OrderedDict(sorted(v.items())) return vals_dict
087232b8766b420552d1b0adaae27471ba3e8d6c
48,453
from typing import Tuple from typing import Optional def split_package(name: str) -> Tuple[Optional[str], str]: """ Return (package, name) given a fully qualified module name package is ``None`` for toplevel modules """ if not isinstance(name, str): raise TypeError(f"Expected 'str', got instance of {type(name)!r}") if not name: raise ValueError(f"Invalid module name {name!r}") name_abs = name.lstrip(".") dots = len(name) - len(name_abs) if not name_abs or ".." in name_abs: raise ValueError(f"Invalid module name {name!r}") package, _, name = name_abs.rpartition(".") if dots: package = ("." * dots) + package return (package if package != "" else None), name
d46eaf6960a238a9aa9cc600202860c3a965ae01
48,454
import torch def rand_mat_list_like_parameters(problem, V): """Create list of random matrix with same trailing dimensions as parameters.""" return [ torch.rand(V, *p.shape, device=problem.device) for p in problem.model.parameters() ]
7aa912060339b7512371f266f63dd8bd659c898c
48,457
def partition(lis, predicate): """ Splits a list into two lists based on a predicate. The first list will contain all elements of the provided list where predicate is true, and the second list will contain the rest """ as_list = list(lis) true_list = [] false_list = [] for l in as_list: pred_value = predicate(l) if pred_value is True: true_list.append(l) elif pred_value is False: false_list.append(l) else: raise Exception("Invalid predicate") return true_list, false_list
0d1cdb7e410ccce46c02209df83d16d4f9823a2d
48,468
def de_digits_for_type(maxvalue, base): """Find the max no of digits needed to represent a value in a given base Keyword arguments: maxvalue -- the maximum decimal value possible for the data type base -- the base to which the values would be converted""" digits = 0 while maxvalue > 0: digits = digits + 1 maxvalue = int(maxvalue / base) return digits
70961e19823fbde1c82589903f3f6f81668e5e04
48,469
def modified_T(t, delta, opts): """ Computes the modified nonlinear strength for use with Chromaticity correction. See Webb et al. "Chromatic and Dispersive Effects in Nonlinear Integrable Optics" for a discussion of the requirements for the transformation of nonlinear strength parameter t to be applied in the calculation of the adjusted invariant. Arguments: t (float): The strength parameter for the nonlinear magnet delta (float): The relative momentum deviation of the particle. opts (options.Options): A Synergia options instance specifying the needed phase advance quanties Returns: t/correction (float): A corrected effective strength for the nonlinear magnet """ mu0 = opts.full_tune nu0 = opts.tune Ch = opts.Ch correction = 1. - (mu0*delta*Ch/nu0) return t/correction
77ad9fa5323c94821ded243ac54e112e3ff03702
48,477
def rescale(volume, min, max): """Rescale the values of a volume between min and max.""" factor = float(max - min) / float(volume.max() - volume.min()) return ((volume - volume.min()) * factor) + min
d15ecf01591f90daf9d196cf3beb31295eccaaa2
48,478
def is_linear(reg_type): """ Checks whether a regression type is linear. """ return reg_type == "linear"
06dde95cc412879623e3a2415f7acc533a5117b2
48,480
from typing import OrderedDict def process_odin_args(args): """ Finds arguments needed in the ODIN stage of the flow """ odin_args = OrderedDict() odin_args["adder_type"] = args.adder_type if args.adder_cin_global: odin_args["adder_cin_global"] = True if args.disable_odin_xml: odin_args["disable_odin_xml"] = True if args.use_odin_simulation: odin_args["use_odin_simulation"] = True return odin_args
92a50c84be6d16f966323d02a9db8d5f32a04c2e
48,483
def format_message_response(params): """ Format automatic response |params| is None if the system can't process the user's message or is not confident enough to give a response. Otherwise, |params| is a triple that consists of the question that the system is trying to answer, the response it has for that question, and the recommended command to run. Return the automatic response that will be sent back to the user's chat box. """ if params is None: return 'Thank you for your question. Our staff will get back to you as soon as we can.' else: question, response, command = params result = 'This is the question we are trying to answer: ' + question + '\n' result += response + '\n' result += 'You can try to run the following command: \n' result += command return result
13073c711a8d5ec5031a7712822f0ae58beef84f
48,486
def _split_image(image, axis='Horizontal'): """Splits an image into two halves and returns each half. Parameters ---------- image : np.ndarray Image to split in half. axis : string (default = 'Horizontal') Which axis to split the image. If 'Horizontal', upper and lower halves of the specified image are returned. If 'Vertical', left and right halves of the specified image are returned. Returns ------- half1, half2 : np.ndarrays of type np.uint8 Image halves, either upper and lower or left and right. """ nrows, ncols = image.shape if axis == 'Horizontal': half1 = image[:nrows/2, :] # upper half half2 = image[nrows/2:, :] # lower half return half1, half2 half1 = image[:, :ncols/2] # left half half2 = image[:, ncols/2:] # right half return half1, half2
4a32b819754f060ee0281c3a47349dd2c6bd2dc3
48,490
import time def TestDelay(duration): """Sleep for a fixed amount of time. @type duration: float @param duration: the sleep duration, in seconds @rtype: (boolean, str) @return: False for negative value, and an accompanying error message; True otherwise (and msg is None) """ if duration < 0: return False, "Invalid sleep duration" time.sleep(duration) return True, None
68877dabba2141635bf0645f7a26cc30c4e98e7f
48,491
from typing import Awaitable from typing import Tuple from typing import Any import asyncio async def async_tuple(*coros: Awaitable) -> Tuple[Any, ...]: """Await on a parameters and get a tuple back. Example: result_one, result_two = await async_tuple(gen_one(), gen_two()) """ return tuple(await asyncio.gather(*coros))
a5a79759eedec03b403aa5b2f529fcf5e7d6d889
48,494
def matix_contains(m, elem): """Look for value in matrix""" for row, i in enumerate(m): try: col = i.index(elem) except ValueError: continue return row, col return -1, -1
f6c23186901b9514da67b65059b657b7e94c4f49
48,497
def tidy_input_string(s): """Return string made lowercase and with all whitespace removed.""" s = ''.join(s.split()).lower() return s
ea0fd434da5c5829b8bc7d38306080371fd36ca1
48,504
def zero_corner(experiment_definition): """Take the parameters corresponding to the zero corner. All of the minimums and the first categories.""" return {p['name']: (p['bounds']['min'] if p['type'] in ['int', 'double'] else p['categorical_values'][0]['name']) for p in experiment_definition['parameters']}
715a2b5ff94f1ea9940535aff3ad33fcb99985e0
48,507
import typing import re def string_split_newlines(string: str) -> typing.List[str]: """Split a string into words while keeping newline characters. Args: string (str): The string to split. Returns: typing.List[str]: A list of words. """ return re.findall(r"\S+|\n", string)
26d1ba4bceef0f3af9c469cad30f86d17c97db19
48,509
def unit_vector(vector): """ Calculate a unit vector in the same direction as the input vector. Args: vector (list): The input vector. Returns: list: The unit vector. """ length = sum([v ** 2 for v in vector]) ** 0.5 unit_vector_ = [v / length for v in vector] return unit_vector_
83a1040cb8f9155ff0057749a5c3a9dd798e1f28
48,512
def host_match(c_host, bw_host): """ Check if a cookie `c_host` matches a bw-list `bw_host`. """ if c_host == bw_host: return True elif bw_host.startswith('.') and c_host.endswith(bw_host): return True return False
3a21fccbb7fd1e4faecbe4807ced0131322bfc07
48,513
def _show_capture_callback(x): """Validate the passed options for showing captured output.""" if x in [None, "None", "none"]: x = None elif x in ["no", "stdout", "stderr", "all"]: pass else: raise ValueError( "'show_capture' must be one of ['no', 'stdout', 'stderr', 'all']." ) return x
f97124fdffd96d8e9013b4a846c9a8b8ef52db88
48,514
import torch def squash(x, eps = 1e-5): """ Squashes each vector to ball of radius 1 - \eps :param x: (batch x dimension) :return: (batch x dimension) """ norm = torch.norm(x, p=2, dim=-1, keepdim=True) unit = x / norm scale = norm**2/(1 + norm**2) - eps x = scale * unit # norm_2 = torch.sum(x**2, dim=-1, keepdim=True) # unit = x / torch.sqrt(norm_2) # scale = norm_2 / (1.0 + norm_2) # scale \in [0, 1 - eps] # x = scale * unit - eps # DO NOT DO THIS. it will make magnitude of vector consisting of all negatives larger return x
5223c37461d45111ce3a0c0216239e1d6c0a8c96
48,524
def calc_total_probe_depth(capture_data): """Takes a capture dict and returns a tuple containing the percentage of nucleotide positions in the target space covered by 0, 1, 2, 3, 4, and 5+ probes.""" total = 0 total_0 = 0 total_1 = 0 total_2 = 0 total_3 = 0 total_4 = 0 total_5 = 0 for header,(seq, depth) in capture_data.items(): total += len(depth) total_0 += depth.count(0) total_1 += depth.count(1) total_2 += depth.count(2) total_3 += depth.count(3) total_4 += depth.count(4) total_5 += len([d for d in depth if d >= 5]) total_0 = round(total_0 * 100 / total, 2) total_1 = round(total_1 * 100 / total, 2) total_2 = round(total_2 * 100 / total, 2) total_3 = round(total_3 * 100 / total, 2) total_4 = round(total_4 * 100 / total, 2) total_5 = round(total_5 * 100 / total, 2) return (total_0, total_1, total_2, total_3, total_4, total_5)
57929b1f70a2875ac721553a8b2dc3c6489743cd
48,530
def mysql_app_client(mysql_app): """Client interacting with app returned by the `mysql_app` fixture.""" return mysql_app.test_client()
64079932230cce2a497f7ac8a2cb3a217d1cd0a4
48,535
def find_pure_symbol(symbols, unknown_clauses): """ Find a symbol and its value if it appears only as a positive literal (or only as a negative) in clauses. Arguments are expected to be in integer representation. >>> find_pure_symbol({1, 2, 3}, [{1, -2}, {-2, -3}, {3, 1}]) (1, True) """ all_symbols = set().union(*unknown_clauses) found_pos = all_symbols.intersection(symbols) found_neg = all_symbols.intersection([-s for s in symbols]) for p in found_pos: if -p not in found_neg: return p, True for p in found_neg: if -p not in found_pos: return -p, False return None, None
ffa20cee768e81cd3525483bc2490ada6482b550
48,539
def getSuperType(date): """Coarse grained distinction between n/a, single date events and timespans""" if 'beginPoint' in date or 'endPoint' in date or 'Q' in date: return 'timespan' if date == 'undefined' or date == 'n/a': return 'n/a' return "singleDay"
7dc96cbe17a4dda12a42235c205b22404c3dd893
48,540
def package_length(p): """ Get the length of a package in java notation :param p: the package as a string :return: the length of the package """ return len(p.split('.'))
9261ab452f7e558c3492d0ca3aed837af9f3989f
48,545
def findClosestRoom (room, roomList): """ Finds the closest room to 'room' in the roomList. Distance is calculated by rectangle centers. """ currentClosest = None closestDistance = None roomCenter = (room[0] + (room[0] + room[2]) // 2, room[1] + (room[1] + room[3]) // 2) for compareRoom in roomList: compareCenter = (compareRoom[0] + (compareRoom[0] + compareRoom[2]) // 2, compareRoom[1] + (compareRoom[1] + compareRoom[3]) // 2) dist = ((compareCenter[0] - roomCenter[0]) ** 2 + (compareCenter[1] - roomCenter[1]) ** 2) ** 0.5 if currentClosest != None and dist < closestDistance: currentClosest = compareRoom closestDistance = dist elif currentClosest == None: currentClosest = compareRoom closestDistance = dist return currentClosest
65276893cb4955152cbfa5c1e5808371a8839bc0
48,546
def str_to_int(s): """Convert binary strings (starting with 0b), hex strings (starting with 0x), decimal strings to int""" if s.startswith("0x"): return int(s, 16) elif s.startswith("0b"): return int(s, 2) else: return int(s)
47ac2e7755d9a4d10b1e0712ad23aabb2c0489f5
48,547
from typing import List def convert_color_from_hex_to_rgb(value: str) -> List[int]: """ converts a hex encoded colors to rgb encoded colors :param value: hex encoded color :return: rgb value """ value = value.lstrip('#') lv = len(value) return list(tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)))
3aa737b8f06e45bad93df466185c4a997e943a13
48,549
import itertools def list_of_combs(arr): """returns a list of all subsets of a list""" combs = [] for i in range(len(arr)): listing = [list(x) for x in itertools.combinations(arr, i+1)] combs.extend(listing) return combs
324de4a3f7bf11600d36cf02386862ad56de832b
48,550
def remove_irrelevant_terms(graph, irrelevant_terms): """ This will prune out irrelevant terms and return a copy of the graph without those nodes Args: graph: Networkx object irrelevant_terms: Iterable giving irrelevant terms. Usually ConstantsAndUtilities.Ignore.iterable Returns: Pruned graph """ graph.remove_nodes_from(irrelevant_terms) return graph
7b7a7e81dff1dc21d579bb2b8f06e98235bd75e6
48,552
import hashlib import json def make_hash(d: dict): """ Generate a hash for the input dictionary. From: https://stackoverflow.com/a/22003440 Parameters ---------- d: input dictionary Returns ------- hash (hex encoded) of the input dictionary. """ return hashlib.md5(json.dumps(d, sort_keys=True).encode("utf-8")).hexdigest()
a4611fdcb54105eeaad336d9bea514f5ab93d6bf
48,554
def _get_default_annual_spacing(nyears): """ Returns a default spacing between consecutive ticks for annual data. """ if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing) = (1, 2) elif nyears < 50: (min_spacing, maj_spacing) = (1, 5) elif nyears < 100: (min_spacing, maj_spacing) = (5, 10) elif nyears < 200: (min_spacing, maj_spacing) = (5, 25) elif nyears < 600: (min_spacing, maj_spacing) = (10, 50) else: factor = nyears // 1000 + 1 (min_spacing, maj_spacing) = (factor * 20, factor * 100) return (min_spacing, maj_spacing)
ddc98001e9324f2486f141b86f285ac48b726dd2
48,558
def get_request_data(request): """Return (data, mimetype, charset) triple for Flask request.""" mimetype = request.mimetype charset = request.mimetype_params.get('charset') data = request.get_data() return (data, mimetype, charset)
f34d3af564282767eb8397190a6c6daded7a6d1b
48,559
def remove_duplicate(duplicate): """ remove duplicates in list """ final_list = [] for num in duplicate: if num not in final_list: final_list.append(num) return final_list
99676a444a76a76bcd0adcd95bba881c0a46b6ad
48,565
def get_overlap(gt_box: list, pr_box: list) -> float: """Intersection score between GT and prediction boxes. Arguments: gt_box {list} -- [x, y, w, h] of ground-truth lesion pr_box {list} -- [x, y, w, h] of prediction bounding box Returns: intersection {float} """ gt_x, gt_y, gt_w, gt_h = gt_box pr_x, pr_y, pr_w, pr_h = pr_box xA = max(gt_x, pr_x) xB = min(gt_x + gt_w, pr_x + pr_w) yA = max(gt_y, pr_y) yB = min(gt_y + gt_h, pr_y + pr_h) return float(max((xB - xA), 0) * max((yB - yA), 0))
c123cba55d323e919b062953740be7f786a95a59
48,568
import click def format_lxml_syntax_error(exc, context_lines=5, full_xml=False): """ Format a :class:`lxml.etree.XMLSyntaxError`, showing the error's context_lines. *exc* should have been augmented with :func:`add_lxml_syntax_error_context` first. *name* is just a generic name indicating where the error occurred (for example the name of a job). *context_lines* is the number of lines to show around the error. If *full_xml* is true, show the entire XML. """ lines = exc.full_xml.splitlines() err_line = exc.lineno - 1 err_offset = exc.offset - 1 if full_xml: start_line = 0 end_line = None else: start_line = err_line - context_lines end_line = err_line + context_lines before_context = lines[start_line:err_line] error_line = lines[err_line] after_context = lines[err_line + 1:end_line] lines = [ 'XML syntax error in %s:' % click.style(exc.context, bold=True), '', click.style(exc.message, bold=True), '', ] # Get the error context lines xml_lines = [] xml_lines += before_context xml_lines.append( click.style(error_line[:err_offset], fg='red') + click.style(error_line[err_offset], fg='red', bold=True) + click.style(error_line[err_offset + 1:], fg='red') ) xml_lines_error_index = len(xml_lines) xml_lines += after_context # Add line numbers gutter gutter_width = len('%s' % (len(xml_lines) + start_line + 1)) gutter_fmt = '%%%si' % gutter_width margin_width = 2 xml_lines = [ click.style(gutter_fmt % (i + start_line + 1), fg='black', bold=True) + ' ' * margin_width + l for i, l in enumerate(xml_lines) ] # Add error marker xml_lines.insert(xml_lines_error_index, ' ' * (err_offset + margin_width + gutter_width) + '^') lines += xml_lines return '\n'.join(lines)
096d926cd8fe7d0a8047e31b9e8ce0c944498a19
48,571
import torch def events_to_voxel_grid_mod(events_torch, num_bins, width, height): """ A slightly modified version of thirdparty.e2vid.utils.inference_utils, where the input is already been placed on a torch device Code from: https://github.com/uzh-rpg/rpg_e2vid """ device = events_torch.device assert (events_torch.shape[1] == 4) assert (num_bins > 0) assert (width > 0) assert (height > 0) with torch.no_grad(): voxel_grid = torch.zeros(num_bins, height, width, dtype=torch.float32, device=device).flatten() # Normalize the event timestamps so that they lie # between 0 and num_bins last_stamp = events_torch[-1, 0] first_stamp = events_torch[0, 0] deltaT = last_stamp - first_stamp if deltaT == 0: deltaT = 1.0 events_torch[:, 0] = (num_bins - 1) * \ (events_torch[:, 0] - first_stamp) / deltaT ts = events_torch[:, 0] xs = events_torch[:, 1].long() ys = events_torch[:, 2].long() pols = events_torch[:, 3].float() pols[pols == 0] = -1 # polarity should be +1 / -1 tis = torch.floor(ts) tis_long = tis.long() dts = ts - tis vals_left = pols * (1.0 - dts.float()) vals_right = pols * dts.float() valid_indices = tis < num_bins valid_indices &= tis >= 0 voxel_grid.index_add_(dim=0, index=xs[valid_indices] + ys[valid_indices] * width + tis_long[valid_indices] * width * height, source=vals_left[valid_indices]) valid_indices = (tis + 1) < num_bins valid_indices &= tis >= 0 voxel_grid.index_add_(dim=0, index=xs[valid_indices] + ys[valid_indices] * width + (tis_long[valid_indices] + 1) * width * height, source=vals_right[valid_indices]) voxel_grid = voxel_grid.view(num_bins, height, width) return voxel_grid
3f720f57cfd82256be47eff59887c696ed354f06
48,572
def normalize_sec_advisory(string): """ Modifies input string to a form which will be further processed. :param string: input string :return: modified string """ normalized_string = string.lstrip(':').strip() return normalized_string
ca652dacac42ce8047b6299c393afce04662c973
48,576
def decToDegMinSec(dd: float) -> tuple: """ Converts decimal degrees to deg/min/sec. Parameters: dd (float): Decimal Degrees Returns: tuple: (degrees,minutes,seconds) of integers """ isPositive = dd >= 0 dd = abs(dd) minutes,seconds = divmod(dd*3600, 60) degrees,minutes = divmod(minutes, 60) degrees = degrees if isPositive else -degrees return (round(degrees),round(minutes),round(seconds))
c0c46ab9be29812084a4a88efde08a9e5702757c
48,581
from typing import Counter def _find_duplicates(barcodes): """Report any barcode observed more than a single time Parameters ---------- barcodes : iterable of str The barcodes to check for duplicates in Returns ------- set Any barcode observed more than a single time dict Any error information or None """ error = None counts = Counter(barcodes) dups = {barcode for barcode, count in counts.items() if count > 1} if len(dups) > 0: error = { "barcode": list(dups), "error": "Duplicated barcodes in input" } return dups, error
b90b7171357ec200df0467f9998509c0291fd58c
48,582
def solr_field(name=None, type='string', multiValued=False, stored=True, docValues=False): """solr_field: convert python dict structure to Solr field structure""" if not name: raise TypeError('solar() missing 1 required positional \ argument: "name"') lookup_bool = {True: 'true', False: 'false'} return {'name': name, 'type': type, 'multiValued': lookup_bool[multiValued], 'stored': lookup_bool[stored], 'docValues': lookup_bool[docValues]}
e6ce4366c54929caa2bbd35380974e1e85602764
48,585
import random def random_alter_crc(test_case=None): """Choose a random bool for alter_crc.""" if test_case: if test_case.get("crc"): return True else: return False else: return random.random() < 0.1
943e6d734a1347b64496e2a863370ff378b87a39
48,586
def section(title): """center in a string 60 wide padded with =""" return "\n{:=^60}\n.".format(title)
f8904ee5429d5ffe0e9373f61f0db98a261359c4
48,590
import ntpath def path_leaf(path): """Returns the leaf of a given path. Args: path (string): path that is going to be processed. Returns: string: path leaf. """ head, tail = ntpath.split(path) return tail or ntpath.basename(head)
4fa581178cf7018148bb9b466766b5b80732a997
48,591
import pytz def datetime_from_utc_to_local(utc_datetime): """Convertie la date et heure donné (utc) en date local :param utc_datetime: datetime utc :return: date locale """ return pytz.utc.localize(utc_datetime, is_dst=None).astimezone()
8b1fea8fe23ae140f8f4275aa768483b50603e93
48,593
def _label_from_key(k: str) -> str: """ Helper function to create the x-tick labels from the keys in the result dict """ if k == 'bayrn': return 'BayRn' elif k == 'epopt': return 'EPOpt' else: return k.upper()
ac1dca696c3d5728f150a6a4b1fd9e352b182f00
48,594
def bytes_to_hex(list_of_bytes): """ >>> bytes_to_hex([1]) '01' >>> bytes_to_hex([100,120,250]) '6478fa' """ if any(i < 0 or i > 255 for i in list_of_bytes): raise ValueError("Value outside range 0 to 255") return "".join("{:02x}".format(i) for i in list_of_bytes)
df3319e7d4a5e68d7b90e755307c4bc8282cae50
48,595
def SGSeries(v): """ SGxxx series selector """ return "SG" in v["platform"]
f85401703769b83e333588a27377f880c24d4037
48,596
def color_rgb_to_int(red: int, green: int, blue: int) -> int: """Return a RGB color as an integer.""" return red * 256 * 256 + green * 256 + blue
94c4bcd5e81b9f7dbd2571984b5d128804843bab
48,598
def extract_unique_entities_and_relations(triples): """ Identifies unique entities and relation types in collection of triples. Args: triples: List of string triples. Returns: unique_entities: List of strings unique_relations: List of strings """ s_entities = set([triple[0] for triple in triples]) o_entities = set([triple[2] for triple in triples]) r_types = set([triple[1] for triple in triples]) unique_relations = sorted(list(r_types)) unique_entities = sorted(list(s_entities | o_entities)) # union of sets return unique_entities, unique_relations
92e7d95991d28d8692fc3a0ab89a3fe487887e41
48,605
def tz_to_str(tz_seconds: int) -> str: """convert timezone offset in seconds to string in form +00:00 (as offset from GMT)""" sign = "+" if tz_seconds >= 0 else "-" tz_seconds = abs(tz_seconds) # get min and seconds first mm, _ = divmod(tz_seconds, 60) # Get hours hh, mm = divmod(mm, 60) return f"{sign}{hh:02}{mm:02}"
a929a46b957063f2f16a66d304a708838a45ba49
48,608
def get_haplotype(read): """Return the haplotype to which the read is assigned Or 'un' for reads that are unphased""" return str(read.get_tag('HP')) if read.has_tag('HP') else 'un'
ff6f45456640e8b7084558a9f38851b664bc729c
48,609
def createContainer(values): """ Create container for szmbolic values, equivalent of Pascal record or C struct commands. :param list values: values to be added as container properties :return: class instance with given properties :rtype: Container >>> a = createContainer({ "floatNumber": 3, "stringValue": "Test"}) >>> print a.__class__ base.Container >>> print a.floatNumber, a.stringValue 3 Test """ class Container:pass result = Container() for key, value in values.iteritems(): setattr(result, key, value) return result
d0a271d7df6ef5829bb90ab660ff6319ac111285
48,611
def meters_formatter(f): """ Returns a float with 4 decimal digits and the unit 'm' as suffix. """ if f is None: return None return '%0.4f m' % f
ae80f27b16fba79b0c02fb25aa61e4324c4ece04
48,612
def uniquify(iterable): """Uniquify the elements of an iterable.""" elements = {element: None for element in iterable} return list(elements.keys())
3932291387a273d12ce5092d0d9544fcf6386af5
48,613
def flatten_lists(listoflists): """ Flatten a python list of list :param listoflists: (list(list)) :return: (list) """ return [el for list_ in listoflists for el in list_]
3bce27cb3775352f646a7450dcd325067c399afe
48,622
def convert_array_list_to_x_y_lists(array_list): """ Returns two lists (first, x values, second, y values of array in array_list. Parameters ---------- array_list : list (of arrays) List with numpy.arrays (with x, y coordinates) Returns ------- list_x : list (of floats) List with x coordinates list_y : list (of floats) List with y coordinates """ list_x = [] list_y = [] for i in range(len(array_list)): list_x.append(array_list[i][0]) list_y.append(array_list[i][1]) return list_x, list_y
9bb796566f273eac1ac83bf7eec1e13c39df7b36
48,641
def _max(iterable): """ Max is zero, even if iterable is empty >>> _max([]) 0 >>> _max([5]) 5 >>> _max([1, 2]) 2 """ try: return max(iterable) except ValueError: return 0
8d77b443eaa9ee77a3fceecef986c265222af29d
48,645
def _generate_weight(label, nl_symbol, n_fold): """ Generate weights based on one multi-label label. label (str): multi-label or multi-class label nl_symbol (str): the symbol representing no label n_fold (float): unlabeled has n_fold weight value comparing to labeled =========================================================================== return (list): weights """ weights = [1] * len(label) for i, lbl in enumerate(label): if lbl == nl_symbol: weights[i] = n_fold return weights
b31c7d8848b46b9e6aebdb60ea5ea461388287dc
48,646
import time import statistics def timereps(reps, func): """Helper function to call a function multiple times for benchmarking.""" times = [] for i in range(0, reps): start = time.time() func() times.append(time.time() - start) return statistics.mean(times)
3af30c3743b0f6af02a2b3441a7a651484c7305c
48,651
def reduce_terms(df_frequency, max_df=1.0, min_df=1, max_terms=None, keep_less_freq=False): """Remove unimportant terms from term-by-document matrix. Parameters ---------- df_frequency : pd.DataFrame max_df : float , between [0, 1] Terms that appear in more % of documents will be ignored min_df : int Terms that appear in <= number of documents will be ignored max_terms : int , None If not None or 0, only top `max_terms` terms will be returned. keep_less_freq : bool Decides wherever to keep most frequent or least frequent words when `max_terms` < len. """ df = df_frequency.copy() corpus_size = df.shape[1] df['doc_apperance'] = df.fillna(0).astype(bool).sum(axis=1) df['doc_frequency'] = df['doc_apperance'] / corpus_size df = df[df.doc_frequency <= max_df] df = df[df.doc_apperance > min_df] if max_terms is not None and max_terms != 0 and max_terms < df.shape[0]: df = df.sort_values('doc_frequency', ascending=keep_less_freq) df = df.head(max_terms) return df.drop('doc_apperance', axis=1)
d245ba4d768797a2ad5c0c3483b6467221610a22
48,653
import glob def get_files(path_name, file_ext='json'): """ get the list of files in the path name :param path_name: <str> file path name to search. :param file_ext: <str> file extension to save. :return: <list> array of files found. """ return glob.glob(path_name + '/*{}'.format(file_ext))
51400d0e2dbce21370e731c09638ac27e914d258
48,654
def I(pcset): """Returns inversion of pcset.""" return [(12-x)%12 for x in pcset]
11bb86a57616400ef033507460c8bd871f048458
48,657
def get_suits(cards): """ Returns a list of strings containing the suit of each card in cards. ex. get_ranks(['2S','3C','5C','4D','6D']) returns ['S','C','C','D','D'] """ return [card[-1] for card in cards]
dbbcfb89f134b3114ec2ea4afcdb86b6bae17399
48,666
def file2text(filepath, verbose=True): """Read all lines of a file into a string. Note that we destroy all the new line characters and all the whitespace charecters on both ends of the line. Note that this is very radical for source code of programming languages or similar. Parameters ---------- filepath : pathlib.Path Path to the file verbose : bool If True, we print the name of the file. Returns ------- text : str All the text found in the input file. """ with filepath.open("r") as f: texts = [line.strip() for line in f.readlines()] texts = [x for x in texts if x and not x.isspace()] if verbose: print(filepath.name) return " ".join(texts)
c002a94238e064e6993b7a8fa2942eafb615a24f
48,668
def post_list_mentions(db, usernick, limit=50): """Return a list of posts that mention usernick, ordered by date db is a database connection (as returned by COMP249Db()) return at most limit posts (default 50) Returns a list of tuples (id, timestamp, usernick, avatar, content) """ list = [] cursor = db.cursor() sql = """select id, timestamp, usernick, avatar, content from posts, users where posts.usernick = users.nick order by timestamp desc""" cursor.execute(sql) query = cursor.fetchall() length = len(query) for index in range(length): if usernick in query[index][4]: list.append(query[index]) return list
1b50f9199b946ff9013d86ff504aa42f01680e7b
48,669
def get_random_urls(prefix='http://www.example-shop.com/product/', size=1000, start_index=None): """ Create random url endpoints. Args: size (int): number of urls to be created. start_index (int): optional argument for starting number. """ if not start_index: start_index = 1 end_index = start_index + size urls = [prefix + str(url_suffix) for url_suffix in range(start_index, end_index + 1)] return urls
e302b4f1003391e1eb6e4e400b9d49e0710782fd
48,670
def exists(*, uri: str) -> bool: """Check for the existance of a local/remote file. Usage:: fs.exists("file:///...") fs.exists("s3://...") Args: uri: URI to the file to check. Returns: bool: True if the file exists, else False. """ try: with open(uri, "rb"): return True except IOError: return False
b037fa08fdc559fffbed30f54bc61979fd6d2442
48,671
import random def random_seq(length: int, charset: str = "ATCG") -> str: """Get a random sequence. :param length: Length of sequence. :param charset: Char population. :return: Random sequence """ return "".join(random.choices(charset, k=length))
036742e9777d15a813b99b61e54dfb03f8a7ab80
48,674
def bed_lines(request): """Return a iterable with bed lines""" lines = [ "#Chromosome/scaffold name\tStart (bp)\tGene End (bp)\tHGNC symbol\tGene ID", "13\t23708313\t23708703\tHMGA1P6\tENSG00000233440\n", "13\t23726725\t23726825\tRNY3P4\tENSG00000207157\n", "13\t23743974\t23744736\tLINC00362\tENSG00000229483\n" ] return lines
5703b724ee2de490e256f0ae6eeadb0b4bb30ead
48,680
from typing import Any from typing import Literal def _return_true(*_: Any) -> Literal[True]: """Always return true, discarding any arguments""" return True
a72e74ecc942a36d472bae7e957a7a6fb0879403
48,682
def strike_water_temp(init_grain_temp: float, strike_water_vol: float, grain_wt: float, target_mash_temp, equip_temp_loss: float = 1) -> float: """Computes strike water temperature based primarily on initial mash temp. Calculation here http://howtobrew.com/book/section-3/ the-methods-of-mashing/calculations-for-boiling-water-additions Args: init_grain_temp: Initial grain temp in F strike_water_vol: Strike water volume in gal grain_wt: Grain weight in lbs target_mash_temp: Target mash temp in F equip_temp_loss: Equipment temperature loss in F (default is 1) Returns: Strike water temperature in F i.e. temp to heat water before adding grain """ # multiply vol by 4 to convert to quarts per pound r = strike_water_vol * 4.0/grain_wt t2 = target_mash_temp + equip_temp_loss return (0.2/r) * (t2 - init_grain_temp) + t2
224dc1608735a6e1431d531371afdafeb2245ae3
48,684
from typing import Iterable def check_for_func(sequence: Iterable) -> bool: """Used to determine if a list or tuple of columns passed into sql function has parethesis '()' which indicate a function that needs to be parsed out Args: sequence (Iterable): list/tupe of column names Returns: bool: True if function found """ # make all elements strings it = map(str, sequence) combined = "".join(it) return "(" in combined
8832c70f98a36eb761fdec5b47385575b92c6ca8
48,686
def _ambisonics_channel_count_from_order(order, three_dim=True): """ Helper function that computes the number of channels for a given ambisonics order. """ return (order + 1)**2 if three_dim else (2 * order + 1)
c4393b29ca4adef07fefb706b73ea0b33d1f2049
48,690
def grouped(iterable, n): """ s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ... \n Turns a list into a list of tuples to then be iterated over\n source: "https://stackoverflow.com/questions/5389507/iterating-over-every-two-elements-in-a-list" :param iterable: The iterable object to be paired up :param n: how many objects per grouping :return: an iterable of grouped members Examples:\n l = [1,2,3,4,5,6]\n for i,k in grouped(l,2): print str(i), '+', str(k), '=', str(i+k)\n Output:\n 1+2=3\n 3+4=7\n 5+6=11\n """ return zip(*[iter(iterable)] * n)
fdf06efa3d65440894afe819b06c972fb7d9ab3c
48,693
import requests def request_waterinfo_timeseries(station_id, period="P3D"): """Request time series data from VMM Waterinfo Parameters ---------- station_id : string ts_id as defined by Waterinfo/Kisters/dbase period : string define the timeframe to request data from. e.g. 1 day: P1D or 10 days: P10D, default to 3 days period Returns ------- dict with the data and metadata fields from the request """ returnfields = ["Timestamp", "Value", "Quality Code"] r = requests.get("http://download.waterinfo.be/tsmdownload/KiWIS/KiWIS", params={"datasource": 0, "type": "queryServices", "service": "kisters", "request": "getTimeseriesValues", "ts_id": station_id, "format": "json", "period": period, "metadata": "true", "returnfields": ','.join(returnfields)}) return r.json()[0]
be1822e85adafc45f55194e315ce2db51db7d664
48,694
import uuid def _parse_legacy_uuid(doc): """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) return uuid.UUID(doc["$uuid"])
6a238a637f98a5f0a7c79374f09bb72fc7b17ebe
48,696
def _get_datasets(dataref): """ Generate a list containing the names of available sample datasets. Parameters ---------- dataref: str Location of dataset reference. Returns ------- list Names of available sample datasets. """ return(list(dataref.keys()))
9363983f6137f700b065881f6edd809dbeb657e2
48,699
def add_relative_src_offset(metadata): """Get the source offset relative to the lens center Parameters ---------- metadata : pd.DataFrame the metadata generated by Baobab Returns ------- pd.DataFrame metadata augmented with relative source offset columns added """ metadata['src_light_pos_offset_x'] = metadata['src_light_center_x'] - metadata['lens_mass_center_x'] metadata['src_light_pos_offset_y'] = metadata['src_light_center_y'] - metadata['lens_mass_center_y'] return metadata
0f312cd36ab291f070667190947d2b3d24c93998
48,700
def sponsor_image_url(sponsor, name): """Returns the corresponding url from the sponsors images""" if sponsor.files.filter(name=name).exists(): # We avoid worrying about multiple matches by always # returning the first one. return sponsor.files.filter(name=name).first().item.url return ''
9646a456ec6137a1616fcdec2f98fdd7827b8303
48,702
import uuid def random_name(module_name): """Generate node name.""" return f"{module_name}_{str(uuid.uuid4()).split('-')[0]}"
07b9b91a016249b7ab83b87f12cf43c44298712e
48,703
def heat_exchanger_utilities_from_units(units): """Return a list of heat utilities from all heat exchangers, including the condensers and boilers of distillation columns and flash vessel heat exchangers.""" heat_utilities = sum([i.heat_utilities for i in units], ()) return [i for i in heat_utilities if i.heat_exchanger]
4292c455b97625cf16e0efe42c860f918607b34b
48,704
def indicator(condition): """ :param condition: A boolean statement on which to evaluate inputs. :type condition: bool :return: 1 if the condition holds, 0 otherwise. :rtype: float Implementation of the indicator function 1_{condition}(x) """ return float(condition)
8e20424efa5c48087bf3ffa958bf1fc27d179cb9
48,708
def general_checks(all_methods_df): """ Checks for the Pandas.DataFrame Parameters ---------- all_methods_df : Pandas.DataFrame Returns ------- tuple - counts: Pandas.DataFrame containing the amount of entries for each column and method - nans: Pandas.DataFrame containing the amount of NaNs for each column and method - times: Pandas.Series containing the time required for each method to perform the alignments of the sample set """ counts = all_methods_df.groupby("method").count() nans = all_methods_df[all_methods_df["SI"].isnull()].groupby("method").count() times = round(all_methods_df.groupby("method")["time"].sum() / 60, 2) return counts, nans, times
140017f4c7df941b1f3e153f5b6cb1c1b4600e1b
48,710
def find_child(parent, child_tag, id=None): """ Find an element with *child_tag* in *parent* and return ``(child, index)`` or ``(None, None)`` if not found. If *id* is provided, it will be searched for, otherwise the first child will be returned. """ for i, child in enumerate(list(parent)): if child.tag == child_tag: if id is None: return (child, i) child_id = child.find(f'{child_tag}ID').text if child_id == id: return (child, i) if child_id.split(',')[-1] == id.split(',')[-1]: return (child, i) return (None, None)
ebc9b9bb6ba7ed78efc7a094478b22de1769ccb9
48,712
def PCopyTables (inUV, outUV, exclude, include, err): """ Copy Tables from one image to another inUV = Python UV object outUV = Output Python UV object, must be defined exclude = list of table types to exclude (list of strings) has priority include = list of table types to include (list of strings) err = Python Obit Error/message stack """ ################################################################ if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'): raise TypeError("Function unavailable for "+inUV.myClass) return inUV.CopyTables (outUV, exclude, include, err) # end PCopyTables
04112aea5039b84a6baec3c1948af617376d4d06
48,716
def traverse_list(n): """ Traverse the whole list. Return the last node and the number of steps to the last node. """ steps = 0 while n.next is not None: n = n.next steps += 1 return n, steps
cd7a3a17f99e9acc54c41b105c4c3befda377f4d
48,718
import torch def omega_norm(Z, Omega): """ Calculates norm(Omega * Z) :param Z: delta perturbation :param Omega: transformation matrix :return: l2 norm of delta multiplied by Omega """ return torch.matmul(Omega, Z.view(Z.shape[0], -1).T).T.norm(dim=1)
fb413fe446ecd70122445087fe945b8685387934
48,728
def replace_nones_in_dict(target, replace_value): """Recursively replaces Nones in a dictionary with the given value.""" for k in target: if target[k] is None: target[k] = replace_value elif type(target[k]) is list: result = [] for e in target[k]: if type(e) is dict: result.append(replace_nones_in_dict(e, replace_value)) else: if e is None: result.append(replace_value) else: result.append(e) target[k] = result elif type(target[k]) is dict: replace_nones_in_dict(target[k], replace_value) return target
4a6dda79e60cbb5760ff524b5be4399232d8fe4f
48,729
import copy def create_unmodified_census(census): """ This creates a cell census, ignoring any modifications. The output is suitable for use as input to `simulate_water_quality`. """ unmod = copy.deepcopy(census) unmod.pop('modifications', None) return unmod
bec0c27ed1993cb70bc6b88027c54349d7cae6a0
48,730
def pairwise_difference(array): """ Take difference between every element pair. """ return array - array[:, None]
2435a1f2b5203f4908881d3f0f2f44b179b5c19e
48,731
def percentage(x, pos): """ Adds percentage sign to plot ticks. """ return '%.0f%%' % x
66a0c001bc289ffd2b41feada434b635fabc7391
48,733