content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def read_blacklisted_ranges(fn, num_alignments): """Read list of blacklisted ranges. There must be 3 columns in the file: 1 - an identifier for an alignment that the guide should be covering (0-based, with maxmimum value < num_alignments) 2 - the start position (inclusive) of a range in the corresponding alignment 3 - the end position (exclusive) of a range in the corresponding alignment Args: fn: path to file, with the format given above num_alignments: the number of alignments that the ranges might correspond to Returns: list x of length num_alignments such that x[i] corresponds to the i'th alignment, as given in column 1. x[i] is a set of tuples (start, end) corresponding to the values in columns 2 and 3 """ blacklisted_ranges = [set() for _ in range(num_alignments)] with open(fn) as f: for line in f: ls = line.rstrip().split('\t') aln_id = int(ls[0]) start = int(ls[1]) end = int(ls[2]) # Check aln_id if aln_id < 0 or aln_id > num_alignments - 1: raise Exception(("Alignment id %d in column 1 of blacklisted " "ranges file is invalid; must be in [0, %d]") % (aln_id, num_alignments - 1)) # Check that end > start if start < 0 or end <= start: raise Exception(("Blacklisted range [%d, %d) is invalid; " "values must be >= 0 and end > start") % (start, end)) blacklisted_ranges[aln_id].add((start, end)) return blacklisted_ranges
2c67a0e60c225d1664313608529e5ff56444a87d
72,332
import csv def parse_csv_data(csv_filename) -> list: # Function to parse csv data into a list. """extracts CSV data and formats it as a list. :param csv_filename: the name of the CSV file you wish to parse. :return: the lines of the CSV as a list of lists. """ file_lines = [] # initialises list to append to with open(csv_filename) as file: csv_reader = csv.reader(file, delimiter=",") # .reader method helps organising file into a 2D array for line in csv_reader: # each line is read and appended to the list file_lines.append(line) return file_lines
ce7c72c0cdbb18a379541e0b2d7e0fb3cc209e55
72,335
def checkOut(out,filename): """ Check for errors in uhbdaa and uhbdpr.out. """ lines = out.split("\n") # Check for fatal errors hash = [l[1:6] for l in lines] try: err_index = hash.index("FATAL") err = ["%s\n" % (80*"-")] err.append("Fatal Error in %s:\n" % filename) err.extend(["%s\n" % (80*"-")]) err.append("tail -5 %s\n\n" % filename) err.extend(["%s\n" % l for l in lines[-5:]]) return 1, "".join(err) except ValueError: return 0, ""
f2b849d1dd72832fc6e807e85a684ca267061d94
72,337
def get_formatted_month_year(month, year): """ Returns month/year formatted like MM/YYYY. e.g.: 09/2015 :param year: Year :param month: Month :return: Formatted month/year (e.g. 09/2015), ``str`` """ return "{0:=02}/{1}".format(month, year)
b2c484044d63661b68a1a3619e73c1f303575ba3
72,338
def _getCcFlags(build): """A wrapper for BuildType.CcFlags that copes with ValueError being raised. This happens with the GccOptNative build type, since the compile flags are machine-specific. """ try: return build.CcFlags() except ValueError: return "[machine specific optimisations are applied]"
38191b8be52b77a22b274d0fb8f89bc744eb236e
72,339
def format_index(index): """Convert Index to fixture format. """ return { "fields": { "name": index.name, "table_id": index.table_id, "workspace_id": str(index.workspace_id), "created_at": str(index.created_at), "updated_at": str(index.updated_at), "object_id": index.object_id, "is_unique": index.is_unique, "is_primary": index.is_primary, "sql": index.sql, }, "model": "definitions.index", "pk": index.pk, }
94d1aeb1a48af39039314a6cd3623c580cb01532
72,340
def writeADESHeader( observatory_code, submitter, telescope_design, telescope_aperture, telescope_detector, observers, measurers, observatory_name=None, submitter_institution=None, telescope_name=None, telescope_fratio=None, comment=None ): """ Write the ADES PSV headers. Parameters ---------- observatory_code : str MPC-assigned observatory code submitter : str Submitter's name. telescope_design : str Telescope's design, eg. Reflector. telescope_aperture : str Telescope's primary aperture in meters. telescope_detector : str Telescope's detector, eg. CCD. observers : list of str First initial and last name (J. Smith) of each of the observers. measurers : list of str First initial and last name (J. Smith) of each of the measurers. observatory_name : str, optional Observatory's name. submitter_insitution : str, optional Name of submitter's institution. telescope_name : str, optional Telescope's name. telescope_fratio : str, optional Telescope's focal ratio. comment : str Additional comment to add to the ADES header. Returns ------- list : str A list of each line in the ADES header. """ # Start header with version number header = [ "# version=2017", ] # Add observatory [required] header += ["# observatory"] header += [f"! mpcCode {observatory_code}"] if observatory_name is not None: header += [f"! name {observatory_name}"] # Add submitter [required] header += ["# submitter"] header += [f"! name {submitter}"] if submitter_institution is not None: header += ["! institution {}".format(submitter_institution)] # Add telescope details [required] header += ["# telescope"] if telescope_name is not None: header += [f"! name {telescope_name}"] header += [f"! design {telescope_design}"] header += [f"! aperture {telescope_aperture}"] header += [f"! detector {telescope_detector}"] if telescope_fratio is not None: header += [f"! fRatio {telescope_fratio}"] # Add observer details header += ["# observers"] if type(observers) is not list: err = ( "observers should be a list of strings." ) raise ValueError(err) for name in observers: header += [f"! name {name}"] # Add measurer details header += ["# measurers"] if type(measurers) is not list: err = ( "measurers should be a list of strings." ) raise ValueError(err) for name in measurers: header += [f"! name {name}"] # Add comment if comment is not None: header += ["# comment"] header += ["! line {}".format(comment)] header = [i + "\n" for i in header] return header
88c2685aa0dbf7de0fbf6af7f354092ea71b37dc
72,343
import csv def _parse_wmic_csv_output(text): """ Parse the output of Windows "wmic logicaldisk list full /format:csv" command. """ # parse out the comma-separated values of each non-empty row rows = [row for row in csv.reader(text.split("\n")) if row] # use the first row as the header row header = rows.pop(0) # turn each row into a dict, mapping the header text of each column to the row's value for that column return [dict(zip(header, row)) for row in rows]
458cd60a21305ff8144440839d395cedd62d6f36
72,345
def is_iterable(f): """ Returns True if an object can be iterated over by using iter(obj) """ try: iter(f) return True except TypeError: return False
d5d2eb93a82a9d42fdbdf3a5bac17646dd1339e5
72,348
def _check_conversion(key, valid_dict): """Check for existence of key in dict, return value or raise error""" if key not in valid_dict and key not in valid_dict.values(): # Only show users the nice string values keys = [v for v in valid_dict.keys() if isinstance(v, str)] raise ValueError('value must be one of %s, not %s' % (keys, key)) return valid_dict[key] if key in valid_dict else key
9f9054265b53efc9ee5d6aaf7aeb56e476d18dd8
72,351
def comment_text(results): """ get comment text """ text = results['text'] return text
66663f27ef4a6992502791aa8cf6d34381380ab1
72,352
def token(args, premise): """Return token.""" return args[0]
20d8efe0e2107fed4826bbaec61079bc7fa917af
72,355
import math def fix_float_single_double_conversion(value: float) -> float: """Fix precision for single-precision floats and return what was probably meant as a float. In ESPHome we work with single-precision floats internally for performance. But python uses double-precision floats, and when protobuf reads the message it's auto-converted to a double (which is possible losslessly). Unfortunately the float representation of 0.1 converted to a double is not the double representation of 0.1, but 0.10000000149011612. This methods tries to round to the closest decimal value that a float of this magnitude can accurately represent. """ if value == 0 or not math.isfinite(value): return value abs_val = abs(value) # assume ~7 decimals of precision for floats to be safe l10 = math.ceil(math.log10(abs_val)) prec = 7 - l10 return round(value, prec)
0964a25a0a8205d5b448388779351e90e3967505
72,360
def get_fi_repeated_prime(p, k=1): """ Return Euler totient for prime power p^k :param p: prime number :param k: power :return: fi(p^k) """ return pow(p, k - 1) * (p - 1)
142c6adf1ef0fbab3d74b3138cd63b19bf60ff62
72,361
def greatest_common_divisor(value1: int, value2: int): """Calcula o maior divisor comum de dois valores""" value1 = abs(value1) value2 = abs(value2) if value1 < value2: value1, value2 = value2, value1 remainder = value1 % value2 if remainder == 0: return value2 return greatest_common_divisor(value2, remainder)
8a9eb15c5e719dc124cc7c21bce6870d64d675e0
72,362
from typing import Tuple def get_rgb_from_hex(hex_str: str) -> Tuple[int, int, int]: """Get the rgb values from a hex string.""" # Adopted from # https://stackoverflow.com/questions/29643352/converting-hex-to-rgb-value-in-python hx = hex_str.lstrip("#") return int(hx[0:2], 16), int(hx[2:4], 16), int(hx[4:6], 16)
4c46249d23852f2e1dcb7a683d72677d447fd349
72,364
def convert_to_face_coords(face_bbox, leye_bbox, reye_bbox): """ Convert the left and right eye bounding boxes to be referenced relative to the face crop instead of the raw image. Args: face_bbox: The face bounding box. leye_bbox: The raw left eye bounding box. reye_bbox: The raw right eye bounding box. Returns: The left and right eye bounding boxes, referenced to the face bounding box. """ leye_bbox[0] -= face_bbox[0] leye_bbox[1] -= face_bbox[1] reye_bbox[0] -= face_bbox[0] reye_bbox[1] -= face_bbox[1] return (leye_bbox, reye_bbox)
356a958fc4c5d0f96f8f34781796288570812b24
72,366
def convert_nonelike_to_none(input_item): """Converts None-like values ("none", "NULL", None, etc) to the uniform string "None". Note, the output is NOT the python None, but a string. Parameters ---------- input_item : string or int Item to be converted to None (e.g. "none", "NULL" or the equivalent in several languagues) Returns ------- return_value : string If input_item is None-like, returns python string "None". Otherwise, returns the input_item. Usage ------- # convert a single value or string convert_nonelike_to_none("none") # convert a column in a pandas DataFrame df["column_name"] = df["column_name"].apply(convert_nonelike_to_none) """ list_None_items = [None, "none","NONE","null","NULL",'Nijedna', 'Cap', 'Niti', 'Ingen', 'Geen', 'Aucun', 'Keine', 'Okenn', 'Egyik', 'Tidak', 'Nessuno', 'Hakuna', 'pagh', 'Neviens', 'Tiada', 'L-eda', 'Mix', 'Ingen', 'Ninguno', 'Brak', 'Nenhum', 'Nici', 'Niko', 'Nobena', 'Ninguno', 'Ingen', 'Dim','NIJEDNA', 'CAP', 'NITI', 'INGEN', 'GEEN', 'AUCUN', 'KEINE', 'OKENN', 'EGYIK', 'TIDAK', 'NESSUNO', 'HAKUNA', 'PAGH', 'NEVIENS', 'TIADA', 'L-EDA', 'MIX', 'INGEN', 'NINGUNO', 'BRAK', 'NENHUM', 'NICI', 'NIKO', 'NOBENA', 'NINGUNO', 'INGEN', 'DIM'] # determine if input_item is in the list input_item_is_None = input_item in list_None_items # define the return_value as either the string "None" or the original input item return_value = "None" if input_item_is_None == True else input_item return return_value
5f57dd07d15c7b753f5a36d2106a4bc5d82994d8
72,373
def _sort_run_times(x, show_time=True): """Sort runs based on acquisition times""" ordered_runs = sorted(x, key=lambda x: x[2]) if show_time: return dict([(i[0], i[1]) for i in ordered_runs]) else: return [i[0] for i in ordered_runs]
b3c78e1b978b668cdd0062dcc974789ddc5613c7
72,376
def sanitize_filename(filename): """Sanitize a filename, removing dangerous characters that could be used to escape the uploads directory. Everything except [A-Z a-z 0-9 - _ .] are stripped, along with the pattern '..' which in *nix systems represents going up a directory in the filepath. """ filename = filename.replace("..", ".") for character in filename: if character not in ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789" + "._-"): filename = filename.replace(character, "") return filename
469fd6bbeb245fe1eac8c837000c4c72df4dc430
72,380
def get_shell_cmd(cmd_object): """ Convert a plumbum cmd to a shell expression """ return ' '.join(cmd_object.formulate(10))
a32011931de12a546f38bb39bc2dbc73e3202d50
72,382
def get_fast_weights(updates, initialization): """Compute task-specific weights Takes initialization parameters and task-specific weight updates and creates the corresponding, new (fast) weights. Parameters ---------- updates : torch.Tensor Updates proposed for parameters. Shape = (#params,1) initialization : list List of initialization parameters. Every element in the list is a torch.Tensor. Returns ---------- fast_weights Task-specific weights obtained by computing initialization + updates """ fast_weights = [] lb = 0 ub = 0 for lid, l in enumerate(initialization): num_els = l.numel() ub += num_els fast_weights.append(l + updates[lb:ub].reshape(l.size())) lb += num_els return fast_weights
08ec9441332050282da3dee5653c1e10442a1fa0
72,383
import shutil def dependency_check(dependency: str) -> bool: """ Checks if a given program is present in the user's $PATH :param dependency: String of program name :return: True if program is in $PATH, False if not """ check = shutil.which(dependency) if check is not None: return True else: return False
0b43ca17df80519e553b08bb746254ce2b8b4903
72,384
def avg(ts): """Calculates the average of the timeseries. Args: ts: A timeseries list of [time, value]. Returns: Average of the timeseries. """ return sum([float(v[1]) for v in ts]) / len(ts)
b853d70841bf064a4f01f846469d76f9110739d4
72,386
def client_superuser(client, admin): """Provides a client with a logged in superuser. """ client.force_login(admin) return client
2433b52f5e9dbaedf73195d77e9015be158a262e
72,387
import csv def read_path2port_map(filename): """ Reads csv file containing two columns: column1 is path name, column2 is port number :param filename: :return: dictionary with port as key and path as value """ res = {} with open(filename, 'r') as csvfile: reader = csv.reader(csvfile) for row in reader: res[row[1]] = row[0] return res
39a317c21d66c544bd50576493cb1e50dbc5b6bf
72,391
def parse_player(player): """Parse an input string for the first player Args: player (string): String to be parsed (eg, X) Returns: (string/None): The player if valid or None """ if player.lower() == "x": return "X" elif player.lower() == "o": return "O" else: print("Player must be X or O") return None
a752b992d9edbc3ae65e17951635e6b52387f51f
72,396
import re def check_operator_spacing_around(code, operator): """Check for correct spacing around the given operator in the line. There should be exactly one space on either side of the given operator. Notice that operators `*` and `&` don't quite follow this rule, since we're okay with `Foo* foo` or `Foo *foo` for pointers, as long as they're consistent about it. :param str code: The line of code to check. :param str operator: The operator to check, such as "+" :returns: The column number of the inconsistent operator, or `None` otherwise. Notice that the column number may be `0`, so you must not check for falsiness, but rather check that `result is not None`. :rtype: int or None """ operator_regex = re.compile(r""" (?P<code_left>\S+) (?P<whitespace_left>\s*) (?P<operator>{operator}) (?P<whitespace_right>\s*) (?P<code_right>\S+) """.format( operator=re.escape(operator) ), re.VERBOSE) whitespace_groups = ["whitespace_left", "whitespace_right"] for match in operator_regex.finditer(code): for group in whitespace_groups: if match.group(group) != " ": return match.start("operator") return None
8ac388446e039d0cca34163448d50c42b01452dd
72,397
def check_keys(model, pretrained_state_dict): """Validate that the pre-trained model has all the parameters needed by the model""" ckpt_keys = set(pretrained_state_dict.keys()) model_keys = set(model.state_dict().keys()) used_pretrained_keys = model_keys & ckpt_keys unused_pretrained_keys = ckpt_keys - model_keys missing_keys = model_keys - ckpt_keys print('Missing keys:{}'.format(len(missing_keys))) print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys))) print('Used keys:{}'.format(len(used_pretrained_keys))) assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' return True
07c008b25577e8763316d3ffe5350e6122674284
72,399
def standardize(X, mean_ctr=True, with_std=True, axis=-1, copy=True): """ Standardization of data Parameters ---------- X : ndarray Data array mean_ctr : bool Mean-center data with_std : bool Normalize by the standard deviation of the data axis : int Axis from which to calculate mean and standard deviation copy : bool Copy data (X) if True, overwite if False """ if copy: Xsc = 1*X else: Xsc = X if mean_ctr: Xsc -= X.mean(axis=axis, keepdims=True) if with_std: Xsc /= X.std(axis=axis, keepdims=True) return Xsc
a56960ad0343beef2e1eebdee975018a40b23354
72,406
import torch from typing import Union from typing import Callable import warnings def do_activation(input_data: torch.Tensor, activation: Union[str, Callable] = "softmax") -> torch.Tensor: """ This function is used to do activation for inputs. Args: input_data: the input that to be activated, in the shape [B] or [BN] or [BNHW] or [BNHWD]. activation: can be ``"sigmoid"`` or ``"softmax"``, or a callable function. Defaults to ``"softmax"``. An example for callable function: ``activation = lambda x: torch.log_softmax(x)``. Raises: NotImplementedError: When input an activation name that is not implemented. """ input_ndim = input_data.ndimension() if activation == "softmax": if input_ndim == 1: warnings.warn("input_data has only one channel, softmax ignored.") else: input_data = input_data.float().softmax(dim=1) elif activation == "sigmoid": input_data = input_data.float().sigmoid() elif callable(activation): input_data = activation(input_data) else: raise NotImplementedError("activation can only be sigmoid, softmax or a callable function.") return input_data
dfdcf3af2aed6adcfddfb007a29a26865c1d2dc6
72,411
def processCEF(assemeblyModelSubstring): """This function parses the substrings of the assembly model string and provides back information on the modularity and planarity.""" assemeblyModelSubstring = assemeblyModelSubstring[1:] nummod = assemeblyModelSubstring.split('X') num = int(nummod[0]) mod = int(nummod[1]) return num, mod
775495132f1fd431139cf7a25127d3381afba55d
72,418
def prev(space, w_arr): """ Rewind the internal array pointer """ length = w_arr.arraylen() current_idx = min(w_arr.current_idx, length) - 1 if current_idx < 0: return space.w_False w_arr.current_idx = current_idx return w_arr._current(space)
89186391e6ce0a8985f051b6c010e51aaf239c7a
72,419
def get_default_chunksize(length, num_splits): """Creates the most equal chunksize possible based on length and number of splits. Args: length: The integer length to split (number of rows/columns). num_splits: The integer number of splits. Returns: An integer chunksize. """ return ( length // num_splits if length % num_splits == 0 else length // num_splits + 1 )
28b13c6140a5685aea75dd7eaaefbd5a7b63b072
72,422
def get_flow_info(http_flow): """ Get the TCP, IP and Meta information of the flow. :param http_flow: http flow object :return: tcp (src,dst ports), ip (src, dst addresses) """ return { "TcpSrcPort": http_flow["TCP"].srcport, "TcpDstPort": http_flow["TCP"].dstport, "IpSrc": http_flow["IP"].src, "IpDst": http_flow["IP"].dst, "MetaSniffTimeStamp": http_flow.sniff_timestamp, "ResultIndex": http_flow.frame_info.number }
71d3f5f6655a91a0b135c18f1369415cd29531f4
72,425
def to_dict(_object, **extra_keys): """ A utility to convert an object with attributes to a dictionary with the optional feature of slapping extra_keys. Because extra_keys can be optionally set, it is assumed that any keys that clash will get overwritten. Private methods (anything that starts with `_`) are ignored. """ dictified_obj = {} for k, v in _object.__dict__.items(): if not k.startswith('_'): # get key value = extra_keys.pop(k, v) dictified_obj[k] = value if extra_keys: for k, v in extra_keys.items(): dictified_obj[k] = v return dictified_obj
5a3204273046f36766f4974dcff725c3f8f27873
72,426
def get_rna_types_from(xrefs, name): """ Determine the rna_types as annotated by some database. Parameters ---------- xrefs : iterable The list of xrefs to fitler to extract the rna types from. name : str The name of the database to use. Returns ------- rna_types : set A set of rna types that is annotated by the given database. """ rna_types = set() for xref in xrefs: if xref.db.name == name: rna_types.add(xref.accession.get_rna_type()) return rna_types
23872b14e8b7555eeda7b807ee2831c00c71665b
72,428
import requests def get_http_status_code(url: str) -> int: """This gets the http status code of a web resource. Parameters ---------- url : string The url which the http status code will try and obtain. Return ------ status_code : int The status code. """ web_request = requests.get(url) status_code = web_request.status_code return status_code
91765c364076ac32cb717e3285c654ef938af5fb
72,429
def filter_company(job): """Filter out company name.""" company_container = job.find('div', attrs={'class': '-name'}) company_name = company_container.get_text().strip() return company_name
fc6f8215fdb378d6be59be786e2bf92e1e6292d0
72,430
def float_from_sat(amount: int) -> float: """Return the float monetary value equivalent of a satoshi amount.""" return float(amount / 1e8)
d0335d0d71695c8038510605707386c92f0631cb
72,437
def prepare_dataframe(kraken_translate_report_fp, taxonomic_rank, taxa_levels, taxa_levels_idx): """Return sets for sample IDs and taxonomy strings. Parameters ---------- kraken_translate_report_fp: str filepath to output of "kraken translate" taxonomic_rank: str taxonomy level (e.g., genus or species) taxa_levels: dict keys are full name taxonomic ranks and values are abbreviated ranks taxa_levels_idx: dict 2-way dict storing integer depths for abbreviated taxonomic ranks Returns ------- sample_ids: list all unique sample IDs in file taxonomies: list all unique taxonomies in file """ total_levels = len(taxa_levels) taxonomic_rank_level_str = taxa_levels[taxonomic_rank] taxonomic_rank_level_int = taxa_levels_idx[taxonomic_rank_level_str] if taxonomic_rank_level_int < 6: split_on_level = taxa_levels_idx[str(taxonomic_rank_level_int + 1)] else: # keep full string (to species level) split_on_level = '\t' sample_ids = set() taxonomies = set() with open(kraken_translate_report_fp) as kraken_translate_report_f: for line in kraken_translate_report_f: label, taxonomy = line.strip().split('\t') sample_id = label.split('_')[0] sample_ids.add(sample_id) # record abundance if taxonomic_rank_level_str in taxonomy: # keep taxonomy string up to specified level taxonomy = taxonomy.split(split_on_level)[0] taxonomies.add(taxonomy) return list(sample_ids), list(taxonomies)
6a2e5bad8e15608c83cd2e176425a4e3d69a7d07
72,444
def compute_in_degrees(digraph): """ Makes a directed graph digraph (represented as a dictionary) and computes the in-degrees for the nodes in the graph """ nodes = digraph.keys() counts = {x:0 for x in nodes} for key in digraph.keys(): values = digraph[key] for value in values: counts[value] += 1 return counts
c07fbaed0b95c78ad9cffbd64719ec160368a65f
72,446
import time def time_to_epoch(t): """ A simple utility to turn a datetime object into a timestamp :param t: datetime object :return: integer """ return int(time.mktime(t.timetuple()))
20dd193c564fa99861de252f8a3d0fdbd2782359
72,450
def truncate_directory(directory: str) -> str: """Simple truncation of a given directory path as string.""" if len(directory) > 50: split_directory = directory.split('/') prefix_truncated = '/'.join(directory.split('/')[len(split_directory)-5:]) return '.../' + prefix_truncated else: return directory
a9d4c1958096be181b67983711907b0d15cfa2c7
72,451
import math def binary_entropy( p ): """Entropy of bernoulli trial""" return - p * math.log( p ) - ( 1 - p ) * math.log( 1 - p )
184ad982248e11983cfb4576a03f5db48b48d61b
72,452
def _process_simple(doc, tag, tag_value): """ Generate node for simple types (int, str) @param doc: xml doc @param tag: tag @param tag_value: tag value @return: node """ node = doc.createElement(tag) node.appendChild(doc.createTextNode(str(tag_value))) return node
9c44586c86f975da310a9034354eb9ca1d136d2c
72,453
def smart_selection(labs, to_select, how='any', val=1): """ Simplify selection of a single or multiple groups in a pandas dataframe. Args: labs (panda dataframe): one-hot-encoded classes membership dataframe with samples as rows and classes as columns. to_select (list of int or strings): the groups to be selected. how (string): selection type, samples are chosen if they belong to 'any' or 'all' classes (default 'how'). val (int): selection value, samples are chosen if their value in the classes memberhsip dataframe corresponds to this (default 1). Returns: (panda series): boolean series with the corresponding selection. """ if isinstance(to_select,list): if how=='any': return (labs[to_select]==val).any(axis=1) elif how=='all': return (labs[to_select]==val).all(axis=1) else: return -1 else: return labs[to_select]==val
49534e60d4e7c90b0763377c03b21a0340f27c03
72,456
import math def get_Rost_ID_threshold(L, n=0): """ This function returns the Rost sequence identity threshold for a given alignment of length "L". @input: L {int} alignment length parameter {int} N parameter in the curve (if > 0 more strict) @return: {Decimal} """ return n+ (480*pow(L,float('-0.32')*(1+pow(float(repr(math.e)),float(repr(float(-L)/1000))))))
e9d5bc41ea40acef751aadac30951a16884c6ed3
72,458
def truncate_roi(orig_roi, src_image_size): """ Returns truncated ROI for source and destination images. Crops ROI so that image edges are handled correctly. """ # Set x position of ROI if orig_roi[0] < 0: src_x = 0 dst_x = -orig_roi[0] w = orig_roi[2] + orig_roi[0] else: src_x = orig_roi[0] dst_x = 0 w = orig_roi[2] # Set y position of ROI if orig_roi[1] < 0: src_y = 0 dst_y = -orig_roi[1] h = orig_roi[3] + orig_roi[1] else: src_y = orig_roi[1] dst_y = 0 h = orig_roi[3] # Set width of ROI if (src_x + w) >= src_image_size[0]: w = src_image_size[0] - src_x - 1 # Set height of ROI if (src_y + h) >= src_image_size[1]: h = src_image_size[1] - src_y - 1 # Create source and destiniatin image ROI's src_roi = src_x, src_y, w, h dst_roi = dst_x, dst_y, w, h return src_roi, dst_roi
ce5699a8771585ebffa7470287143e89771c5b25
72,459
import re def count_string_characters(string): """Count number of characters as displayed in string. "" -> 0 characters "abc" -> 3 characters "aaa\"aaa" -> 7 characters "\x27" -> 1 character """ string = string[1:-1].replace(r'\\', '_').replace(r'\"', '"') regex = r'\\x[0-9a-f]{2}' return len(re.sub(regex, '_', string))
9484a5c9bf3a7a503d09006d76f83922323ac216
72,460
def parse_courses(courses): """ Parse information of all courses that are retrieved from Moodle. :param courses: Statements for all courses. :type courses: list(dict(str, int)) :return: A dictionary containing the names and ids of all courses. :rtype: list(dict(str, int)) """ courses = courses[:] del courses[0] course_info = [{"courseId": course['id'], "name": course['fullname']} for course in courses] return course_info
3bad5f4633a4e5cfbdac7fb599fc8bbced72da12
72,462
def _get_shape_str(shape): """Convert image shape to string for filename description """ return '{}_{}'.format(*shape[:2])
b290f415bc5c92c000e5955232f197bbc5cd91e8
72,464
from io import StringIO def print_snapshots(ec2, region): """ Print a list of snapshots :param ec2: The boto3 ec2 client :param region: The region to search in :return: The nicely formatted list of snapshots to print """ snapshots = ec2.describe_snapshots(OwnerIds=['self']) snapshot_list = snapshots['Snapshots'] found = 0 fp = StringIO() fp.write('\n') fp.write('Snapshots in %s\n' % region) fp.write('----------------------------------\n') for snapshot in snapshot_list: snapshot_id = snapshot['SnapshotId'] snapshot_start_time = snapshot['StartTime'].strftime("%Y-%m-%d") fp.write(str(snapshot_id)) fp.write(' ') fp.write(str(snapshot_start_time)) fp.write('\n') found += 1 sn_out = fp.getvalue() fp.close() if not found: return "" return sn_out
e7d5167e5df20c3e6d41ccf791f22f6edf77799d
72,469
def is_prime(n): """ Return True if n is prime, false otherwise. >>> is_prime(1) False >>> is_prime(2) True >>> is_prime(19) True """ if n == 1: return False i = n - 1 while i > 1: if n % i == 0 or n == 1: return False i -= 1 return True
baf0d24cae6858b1d7c42b750a4849ba866eb4c6
72,471
def longestCommonPrefix(*sequences): """ Returns longest common prefix occuring in given sequences Reference: http://boredzo.org/blog/archives/2007-01-06/longest-common-prefix-in-python-2 >>> longestCommonPrefix('foobar', 'fobar') 'fo' """ if len(sequences) == 1: return sequences[0] sequences = [pair[1] for pair in sorted((len(fi), fi) for fi in sequences)] if not sequences: return None for i, comparison_ch in enumerate(sequences[0]): for fi in sequences[1:]: ch = fi[i] if ch != comparison_ch: return fi[:i] return sequences[0]
a7363e7bb42ce99084419eb6e227a5a051856155
72,472
def get_full_date(date): """ Returns a string like 'November 27, 2009' """ return date.strftime("%B %d, %Y")
823c7802a68d575a6ab39f4cbe352a9fb0fe0514
72,474
def eliminate_prefix(v, u): """ Removes prefix b from the given input word v If v = uw (u=prefix, w=suffix), w = u-1 v Parameters: ----------------------------------- v: str A (sub)word u: str The prefix to remove Returns: ----------------------------------- w : str (sub)word with the prefix removed """ w = v.lstrip(u) return(w)
c17d69672e205267a1917b35ad30117300073522
72,485
def get_hypercube_interpolation_fn(coefficients): """Returns function which does hypercube interpolation. This is only for 2^d lattice aka hypercube. Args: coefficients: coefficients of hypercube ordered according to index of corresponding vertex. Returns: Function which takes d-dimension point and performs hypercube interpolation with given coefficients. """ def hypercube_interpolation_fn(x): """Does hypercube interpolation.""" if 2**len(x) != len(coefficients): raise ValueError("Number of coefficients(%d) does not correspond to " "dimension 'x'(%s)" % (len(coefficients), x)) result = 0.0 for coefficient_index in range(len(coefficients)): weight = 1.0 for input_dimension in range(len(x)): if coefficient_index & (1 << input_dimension): # If statement checks whether 'input_dimension' bit of # 'coefficient_index' is set to 1. weight *= x[input_dimension] else: weight *= (1.0 - x[input_dimension]) result += coefficients[coefficient_index] * weight return result return hypercube_interpolation_fn
7db89e0c896ee03065adcc61cee235f10ddf7882
72,486
def bookValue(firm, date, pdatabase, printWarnings=True, bookUnit=1000000): """ Firm's book value as of latest quarterly earnings report preceding date Relies on naming of pdatabase (compustat) and chronological data for each firm Returns -1 if no data available Returns FLOAT """ bookQuery = pdatabase.data.query('(tic == "' + firm + '") & (datadate < ' + date + ')') # print(bookQuery) if bookQuery.empty: if printWarnings: print("NO BOOK DATA: " + firm + ", " + date) return -1 bookSeries = bookQuery["ceqq"] return float(bookSeries.iat[bookSeries.size - 1] * bookUnit)
395e43bc920fd5dbe78c2d9be738192cc750570a
72,490
def float_range(x, low, high, strong=True): """ Check if x is between low and high values. It works for int and float ranges.""" if strong: result = low <= x <= high else: result = low < x < high return result
4dcf957a74f3ddf07ca2768a611d5f8aaf5099a4
72,492
import torch def resolvent(A, B, C, z): """ A: (... N N) B: (... N) C: (... N) z: (... L) returns: (... L) represents C (z - A)^{-1} B """ if A.is_complex() or B.is_complex() or C.is_complex() or z.is_complex(): dtype = torch.cfloat else: dtype = torch.float N = A.shape[-1] I = torch.eye(N).to(device=A.device, dtype=dtype) A_ = I * z[...,None,None] - A.unsqueeze(-3) B = B.to(dtype)[..., None, :, None] # (... L N 1) r = torch.linalg.solve(A_, B).squeeze(-1) # (... L N) r = torch.sum(r*C.to(dtype).unsqueeze(-2), dim=-1) return r
b7e92af5c803042e0c2625f2b03aeed335091058
72,493
def find_in_list_by_name(some_list: list, name: str): """ Helper created to find ModelField by required name :param some_list: list of objects with name field :param name: name of object to be found :raises ValueError: not found :return: object with required name """ for item in some_list: if item.name == name: return item raise ValueError(f"List: {some_list} doesn't have this name: {name}")
b1f6450f180121ad9bcaf568eacd03990dc46972
72,496
def cmp(a, b): """ Equivalent of python 2 cmp function for python 3 """ return (a > b) - (a < b)
ce93052a446d0296ae53bbeed81463f31a23f0d0
72,497
def str2bool(txt): """ Convert a string to a boolean :param txt: string object :return: boolean """ if txt.lower() in ['1', 'true', 'yes', 'y']: return True elif txt.lower() in ['0', 'false', 'no', 'n']: return False else: raise ValueError("Can't convert \"{}\" to a boolean".format(txt))
dfa06d1908b56f58c423a3b9ed6e1748d37e81e1
72,498
def isChildDir(parent:str, cand:str) -> bool: """ Returns true if parent is an ancestor of cand. """ if cand.startswith(parent) and len(cand) > len(parent): return True return False
7468d4981ef8d539efd94ee5c6629d5cd95e5b69
72,502
def add_arg(p,*args,**kwds): """Add an argument or option to a parser Given an arbitrary parser instance, adds a new option or argument using the appropriate method call and passing the supplied arguments and keywords. For example, if the parser is an instance of argparse.ArgumentParser, then the 'add_argument' method will be invoked to add a new argument to the parser. Arguments: p (Object): parser instance args (List): list of argument values to pass directly to the argument-addition method kwds (mapping): keyword-value mapping to pass directly to the argument-addition method """ for add_arg in ('add_argument','add_option',): try: return getattr(p,add_arg)(*args,**kwds) except AttributeError: pass raise Exception("Unrecognised subparser class")
0ae10e68e18676045c06d7fc5324af4206f4c7b5
72,506
def x_point_mid(df_agg): """Set the x_point to be mid-way between x_left and x_right""" res = df_agg.assign( x_point=lambda df: (df['x_left'] + df['x_right']) / 2. ) return(res)
9410c1806b71c3097063c7cc628a69cf4e63aa08
72,507
def inner_fidget(*args, **kwargs): """ define a ValidWidget class as an idiomatic inner widget :param args: arguments forwarded to the template :param kwargs: arguments forwarded to the template """ def ret(cls): cls.__is_inner__ = cls.template(*args, **kwargs) return cls return ret
a2d0be0afb1f62d5f8243fcc5c7427c8477c0ddd
72,508
def get_chunk_handlabels(handlabels_list): """ Get all of the Hand-labels from all of the 'labels' tiers from the handlabels_list Parameters ---------- handlabels_list : list List of all Dictionaries containing all of the handlabels for each chunk for one day shape: [chunk_num] -> {'labels' : [labels, onsets], 'female' : [labels, onsets], 'KWE' : [time], 'old_epochs' : {'epoch_#' : [labels, onsets]} # Only if Labeled the old way } Returns ------- labels_list : list list of labels for all epochs for one day [Chunks] -> [Labels] onsets_list : list list of start and end times for all labels for one day [[Chunks]->[Start Time] , [Chunks]->[End Time]] """ labels_list = [] starts_list = [] ends_list = [] for chunk in handlabels_list: labels, [starts, ends] = chunk['labels'] # Get Label Compnents labels_list.append(labels) starts_list.append(starts) ends_list.append(ends) onsets_list = [starts_list, ends_list] return labels_list, onsets_list
c2c5845e7813baf27b37ed0cd7ae4c856414b370
72,514
def read_embedding_set(args): """ Reads a nodelist with vertices to be embedded. """ vertices = set() with open(args.embed_subset, 'r') as f: for line in f: vertex = int(line.strip()) vertices.add(vertex) return list(vertices)
06401cd3581ccfc1f406ac9c7833381c85865497
72,515
def entity_char_to_token(entity, sentence): """Takes Stanza entity and sentence objects and returns the start and end tokens for the entity. The misc value in a stanza sentence object contains a string with additional information, separated by a pipe character. This string contains the start_char and end_char for each token, along with other information. This is extracted and used to match the start_char and end char values in a span object to return the start and end tokens for the entity. Example entity: {'text': 'Barack Obama', 'type': 'PERSON', 'start_char': 0, 'end_char': 13} Example sentence: [ {'id': 1, 'text': 'Barack', ..., 'misc': 'start_char=0|end_char=7'}, {'id': 2, 'text': 'Obama', ..., 'misc': 'start_char=8|end_char=13'} ] Args: entity: Stanza Span object sentence: Stanza Sentence object Returns: Returns the token index of start and end locations for the entity """ start_token, end_token = None, None for i, v in enumerate(sentence.words): x = v.misc.split("|") if "start_char=" + str(entity.start_char) in x: start_token = i if "end_char=" + str(entity.end_char) in x: end_token = i + 1 return start_token, end_token
9f41843c660614e3af4bc67b25042a399029dfda
72,519
def sort_key(item): """Case-insensitive sorting.""" return item['name'].lower()
ccde3887d7eaa7d4cd1c1a44fa547943e505062f
72,521
def smooth_color_func(niter, func): """Version of :func:`smooth_color` that accepts a function. Can be used to pre-calculate a color list outside of a loop. Parameters ---------- niter : int number of iterations func : callable Examples -------- >>> from pwtools import mpl >>> mpl.smooth_color_func(3, lambda z: (z,0,1-z)) [(0.0, 0, 1.0), (0.5, 0, 0.5), (1.0, 0, 0.0)] >>> for ii,color in enumerate(mpl.smooth_color_func(10, lambda z: (z,0,1-z))): ... plot(rand(20)+ii, color=color) """ col = [] fniter = float(niter) - 1 for ii in range(niter): z = float(ii) / fniter col.append(func(z)) return col
87bc0d17e01820ee6b9c5dbf6454128053b9c3e1
72,524
def lazy_reverse_binmap(f, xs): """ Same as lazy_binmap, except the parameters are flipped for the binary function """ return (f(y, x) for x, y in zip(xs, xs[1:]))
1a2a982aa9aa69729ec31b57096b1c6fd34c4199
72,528
def get_syntax(command, fn, command_prefix): """ Read back the syntax argument provided in a command's wrapper. Return it in a printable format. :param command: Command being called. :param fn: Function which the command is wrapped around. :param command_prefix: Prefix used for commands in chat. :return: String. Syntax details of the target command. """ return "Syntax: {}{} {!s}".format( command_prefix, command, fn.syntax)
2780b883bc067805744baa9163ba3ad7786490c3
72,534
def nucleotides(data, start, end): """ :param data: list of string of length 50 nucleotides :param start: coordinate 0 index :param end: coordinate 0 index :return: sequence of nucleotide at chr:start-end """ def nucleotide(index): return data[(index // 50) + 1][index % 50].upper() sequence = "".join([nucleotide(index) for index in range(start, end)]) return sequence
9c2037d07dcb92433380565c37d064137925fa05
72,541
def parse_tlpdb_to_dict(tlpdb_path): """Reads given tlpdb database and creates dict with packages, their dependencies and files """ with open(tlpdb_path, "r") as f: packages = f.read().split("\n\n") pkg_dict = {} for pkg in packages: if not pkg == "": pkg_lines = pkg.split("\n") pkg_name = pkg_lines[0].split(" ")[1] # We only care about files and depends pkg_dict[pkg_name] = {"depends" : [], "files" : []} i = 0 while i < len(pkg_lines): if pkg_lines[i].split(" ")[0].startswith("runfiles"): # Start of file list i += 1 while i < len(pkg_lines) and pkg_lines[i].startswith(" "): # files starts with space, for example # " texmf-dist/tex/latex/collref/collref.sty" pkg_dict[pkg_name]["files"].append(pkg_lines[i].split(" ")[1]) i += 1 if i == len(pkg_lines): break if pkg_lines[i].split(" ")[0] == "depend": pkg_dict[pkg_name]["depends"].append(pkg_lines[i].split(" ")[1]) i += 1 return pkg_dict
c3fb4905a71f98d48dae11e71a7380ec4feeb69d
72,542
def floor_pow2(n): """ Find the nearest power of 2 that's <= n """ k = 0 n >>= 1 while n: k += 1 n >>= 1 return k
cf3769cab22ecf80c915b72cc0854f149c9e4407
72,543
def get_channel_names(stream): """ extract channel name from xdf stream :param stream: dictionnary xdf stream to parse :return: list list of channels names """ try: return [ channel_info["label"][0] for channel_info in stream["info"]["desc"][0]["channels"][0]["channel"] ] except TypeError: print("Warning : Channel description is empty") return None except IndexError: print("Warning : No channels names found") return None
da8038cc6f676616518f64e83c4f8889473b046e
72,546
import sqlite3 def connect_to_db(local=True): """ initialize connection to local sqlite3 """ con = None if local: # Create a SQL connection to our SQLite database con = sqlite3.connect("./db/tableTennisData.db") return con
8ac48777680e7ff63df856bb61e2db8d3cef6b61
72,548
def pl2_to_pl(src_dict, scale=1000.): """Convert integral flux of PL2 to prefactor of PL""" index = src_dict['spectral_pars']['Index']['value'] emin = src_dict['spectral_pars']['LowerLimit']['value'] emax = src_dict['spectral_pars']['UpperLimit']['value'] f = src_dict['spectral_pars']['Integral']['value'] prefactor = f * (1. - index) prefactor /= (emax ** (1. - index) - emin ** (1. - index)) prefactor *= scale ** -index return prefactor
7cc1a57ac8763e08968ea32a93c867311c242785
72,551
def _compare_cat_hist(b1, b2, h1, h2): """ Compare two categorical histograms and return a overlap score based on RMSE b1 bin edges of hist 1 b2 bin edges of hist 2 h1 histogram values of hist 1 h2 histogram values of hist 2 Return rmse-based overlap score """ cbe = list(set(b1) | set(b2)) total = len(cbe) rmse = 0.0 if sum(h1) == 0 or sum(h2) == 0: return 0.0 for index in range(total): sh1 = 0.0 sh2 = 0.0 try: sh1 = float(h1[b1.index(cbe[index])]) except Exception as e: sh1 = 0.0 try: sh2 = float(h2[b2.index(cbe[index])]) except Exception as e: sh2 = 0.0 sh1 = sh1 / sum(h1) sh2 = sh2 / sum(h2) rmse += ((sh1 - sh2) ** 2) rmse = (rmse) ** 0.5 print("Cat: rmse score: {}".format(rmse)) return rmse
7d819a06f2bb9c6b24f001bbcda88824e896a763
72,553
def convert_8_8_to_float(val_1, val_2): """8.8 to float converter""" return val_1 + (val_2 / 256)
7e4fee85e497baa207dcf7b532e2c3a1e7d29367
72,565
import math def _conv_out_size(input_size, filter_size, stride, padding="VALID"): """ Computes the output size for a given input size after applying a 1D convolution with stride. Args: input_size : integer input size. filter_size : size of 1D filter. stride : step size between filter applications. padding : type of zero-padding used in the convolution. Returns: Integer output size. """ if padding == "VALID": sizes = (input_size - filter_size + 1) elif padding == "SAME": sizes = input_size else: raise ValueError("Invalid padding: {}".format(padding)) return int(math.ceil(sizes / stride))
63bd6c226514bb5c0437fb04174f7ff80cb33757
72,566
def error_message(e, message=None, cause=None): """ Formats exception message + cause :param e: :param message: :param cause: :return: formatted message, includes cause if any is set """ if message is None and cause is None: return None elif message is None: return '%s, caused by %r' % (e.__class__, cause) elif cause is None: return message else: return '%s, caused by %r' % (message, cause)
8017f426f8d6b94bbed729e91b48aad8f36e9fad
72,569
def can_candidate_access(application, email): """ Can candidate access Application Match on the authorized email field on Application model. Do not match on the candidate email field on the Questionnaire model. Parameters ---------- application : Application Application that is potentially being accessed email : str Candidate email address (authorized_email) Returns ------- bool Returns true for access granted """ authorized_email = application.authorized_email if authorized_email is not None and ( authorized_email.lower() == email.lower() ): can_access = True else: can_access = False return can_access
9448dc65cd314d803f0c27933b04e1f4903c8fa6
72,570
def extension(path : str) -> str: """Return the extension of the given path""" idx = path.rfind('.') if idx == -1: return '' if idx == 0: return '' return path[idx+1:]
aacf2b8976094de60c67861ce497f35dcd25adee
72,571
import re def _clean_sent(sent): """ This preprocessing function takes in a sentence object, grabs just the actual sentence, and runs three regex substitutions to clean up the sentence for querying. """ s = re.sub(r'\s([\.,!?\)\]])', r'\1', sent.get_sentence().lower()) s = re.sub(r'([\(\[])\s', r'\1', s) s = re.sub(r'(\s’[\ss])', r'’', s, re.UNICODE) return s
95adc7b79a1663e8e00490077d65a1945ce36737
72,572
def gene_name_pull(feature): """Attempt to grab gene name, if gene name not found, grab locus_tag instead.""" try: if len(feature.qualifiers['gene']) != 1: print(feature.qualifiers['gene']) assert False return feature.qualifiers['gene'][0] except KeyError: if len(feature.qualifiers['locus_tag']) != 1: print(feature.qualifiers['locus_tag']) assert False return feature.qualifiers['locus_tag'][0]
d88d829f02a29fa17a9a94e6ccda6e9091521210
72,574
from typing import List def _unique(collection: List[str]) -> bool: """ Determine if all elements of a collection are unique. :param collection: The collection :type collection: Iterable :return: `True` if all elements are unique, `False` otherwise :rtype: bool """ return len(set(collection)) == len(collection)
fe727e13852ea9baf7696a3eda7e5f2ab57d4b5d
72,575
def check_keyword(line, keyword): """ Check if the line starts with the keyword Replaces - for _ for backwards compatibility. """ new_line = line.replace("-", "_") return new_line.startswith(keyword)
2e9f1902db76527e2ecb3e7c6ce43e37ac889e04
72,578
def char_tokenizer(string): """ Splits a string into individual character symbols. Args: string (string): The string to be split into characters. Returns: list: The characters of the string. """ return list(string)
2694f670bf862321e42768d63970f04b76782f07
72,580
def bisect(a, x, lo=0, hi=None, cmp=lambda e1, e2: e1 < e2): """ a simplified and generalized version of python's bisect package: https://docs.python.org/3.6/library/bisect.html return the index where to insert item x in a list a a must be sorted (in ascending order) the return value i is such that: 1. all e in a[:i] have: cmp(e, x) 2. all e in a[i:] have: not cmp(e, x) if cmp is set to <=, the function goes for the rightmost position; if cmp is set to <, the function goes for the leftmost position. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo + hi) // 2 if cmp(a[mid], x): lo = mid + 1 else: hi = mid return lo
8fcf5c82f136b93c956069c17fbe338300ddc176
72,582
import math def pick(n, k): """ Return the number of ways to select k objects from n objects with order :param n: int :param k: int :return: int >>> pick(1, 0) 1 >>> pick(1, 2) 0 >>> pick(5, 2) 20 """ return math.factorial(n) // math.factorial(n - k) if n >= k else 0
a0a07a2b8a0194dba994ff19849e381696620728
72,588
def getDP(directionPointer: int) -> str: """ Finds the correct direction pointer string :param directionPointer: Input direction pointer :return: direction pointer string """ if directionPointer == 0: return 'r' if directionPointer == 1: return 'd' if directionPointer == 2: return 'l' return 'u'
05d02afdb7c20dfc796abb42563d983a2d45316f
72,594
def spark_points_flat(lst, key): """ :param lst: list of dictionary :param key: key value want to flat :return: string flat with ',' """ return ','.join(str(dic[key]) for dic in lst)
bdb00610c6a4c0213e6acc2c2f9d02c0e0c3f666
72,596
import ast def checker(file_data: str, path: bool = True) -> bool: """Given a python file path/content, check whether the given file is syntactically correct or not Args: file_data: A path object or file content path: True if file path is passed instead of its content Return: True if the file/content is syntactically correct """ try: if path: with open(file_data) as f: source = f.read() else: source = file_data ast.parse(source) return True except SyntaxError: return False except Exception: return False
bfe2edf400b3d34327292112be5f6a94a2b61d0c
72,597
import inspect def __get_caller_name(caller_frame): """Gets the name of the caller (fully qualified, if possible). :param caller_frame: A caller's frame. :type caller_frame: :class:`frame` :returns: The name of the caller (:class:`str`). """ caller_name = caller_frame.f_code.co_name if 'self' in caller_frame.f_locals: caller_name = "%s.%s" % ( caller_frame.f_locals['self'].__class__.__name__, caller_name ) module = inspect.getmodule(caller_frame) if module: caller_name = "%s.%s" % (module.__name__, caller_name) return caller_name
75998e451ded756afcd10a3bfa4a69df662ed82d
72,598
def is_inside(aabb, point): """ tests if a point is inside an aabb aabb: (x,y,w,h) point: (x,y) """ x,y,w,h = aabb u,v = point return u >= x and u < x+w and \ v >= y and v < y+h
307996f7089fca486d078f6f3f6e3d2e998d312b
72,601
import re def get_cupy_release_for(chainer_version): """Returns CuPy version required for the given Chainer version.""" m = re.search(r'^v(\d)\.(.+)$', chainer_version) if m is None: raise ValueError(chainer_version) chainer_major = int(m.group(1)) chainer_rest = m.group(2) if chainer_major <= 1: raise ValueError('Chainer v1 or earlier is unsupported') elif 2 <= chainer_major <= 3: # Chainer vN requires CuPy v(N-1). return 'v{}.{}'.format((chainer_major - 1), chainer_rest) else: # The same versioning as Chainer. return chainer_version
0a3840c884f831d52d47c675a79ce29bddceb109
72,602