content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def is_subsequence(short_list, long_list): """Return true if short_list is a subsequence of long_list.""" if len(short_list) > len(long_list): return False for i, _ in enumerate(long_list): looks_good = True for j, item_from_short_list in enumerate(short_list): if len(long_list) - j == 0: break # In this case, we've reached the end of the long list and still only have # a partial match. So we're not going to get one. if i + j == len(long_list): looks_good = False break if item_from_short_list != long_list[i + j]: looks_good = False break if looks_good: return True return False
67a3b2dd9ae326e2710821f074033582f33d6d7d
90,448
def _model_with_preprocess_fn(model, model_preprocess_fn): """Combines the model with its preprocessing function. Args: model: Callable taking (preprocessed_images, is_training, test_local_stats) and returning logits. model_preprocess_fn: Image pre-processing to be combined with `model`. Returns: Callable taking (raw_images, is_training, test_local_stats) and returning logits. """ def model_with_preprocess(images, **batchnorm_kwargs): # Transform inputs from [0, 1] to roughly [-1, +1], which is the range used # by ResNets. (This may entail slightly different transformations for each # channel to reflect the dataset statistics.) # Do so here (instead of during dataset loading) because # adversarial attacks assume that the model inputs are [0, 1]. images = model_preprocess_fn(images) return model(images, **batchnorm_kwargs) return model_with_preprocess
4563865ae1656fe5317955dca8f3affa3753a5b1
90,449
def isacn(obj): """isacn(string or int) -> True|False Validate an ACN (Australian Company Number). http://www.asic.gov.au/asic/asic.nsf/byheadline/Australian+Company+Number+(ACN)+Check+Digit Accepts an int, or a string of digits including any leading zeroes. Digits may be optionally separated with spaces. Any other input raises TypeError or ValueError. Return True if the argument is a valid ACN, otherwise False. >>> isacn('004 085 616') True >>> isacn('005 085 616') False """ if isinstance(obj, int): if not 0 <= obj < 10**9: raise ValueError('int out of range for an ACN') obj = '%09d' % obj assert len(obj) == 9 if not isinstance(obj, str): raise TypeError('expected a str or int but got %s' % type(obj)) obj = obj.replace(' ', '') if len(obj) != 9: raise ValueError('ACN must have exactly 9 digits') if not obj.isdigit(): raise ValueError('non-digit found in ACN') digits = [int(c) for c in obj] weights = [8, 7, 6, 5, 4, 3, 2, 1] assert len(digits) == 9 and len(weights) == 8 chksum = 10 - sum(d*w for d,w in zip(digits, weights)) % 10 if chksum == 10: chksum = 0 return chksum == digits[-1]
cbabe4a84113cdc02f85922b0012e97c22b99e9e
90,464
def get_err_prob(Q): """ Returns the probability of a given base call being incorrect, based on quality score """ return(10**(-Q/10))
1c34a3c4d15e683c64d68f81b6f5aa306377c6c4
90,474
def _uniquify_fetches(fetch_mappers): """Uniquifies fetches from a list of fetch_mappers. This is a utility function used by _ListFetchMapper and _DictFetchMapper. It gathers all the unique fetches from a list of mappers and builds a list containing all of them but without duplicates (unique_fetches). It also returns a 2-D list of integers (values_indices) indicating at which index in unique_fetches the fetches of the mappers are located. This list is as follows: values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index Args: fetch_mappers: list of fetch mappers. Returns: A list of fetches. A 2-D list of integers. """ unique_fetches = [] value_indices = [] seen_fetches = {} for m in fetch_mappers: m_value_indices = [] for f in m.unique_fetches(): j = seen_fetches.get(f) if j is None: j = len(seen_fetches) seen_fetches[f] = j unique_fetches.append(f) m_value_indices.append(j) value_indices.append(m_value_indices) return unique_fetches, value_indices
e5cf26c2856d04e51cf9c5c87e0d0047a887290d
90,475
import csv def get_vendor_ids(tools_csv): """ Get vendor and tool info from tools.csv Parameters: tools_csv path to the _data/tools.csv in the fmi-standard.org repository Returns: a dictionary {vendor_id: (tool_id, tool_name)} """ vendors = {} with open(tools_csv, 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='"') next(reader) # skip the header for row in reader: tool_name, tool_id, vendor_id = row[:3] if vendor_id in vendors: vendors[vendor_id].append((tool_id, tool_name)) else: vendors[vendor_id] = [(tool_id, tool_name)] return vendors
140401b65b03d0605ad484946f3224c94d18ce4e
90,476
def file2uri(path): """Returns the web file URI for the given file path""" return 'file:///' + path.replace('\\', '/')
69dee786fd76be669087aeb457de62c4b555f8ee
90,478
import torch def get_batch(data_iterator, args): """ Generate a batch func. input schema: data_iterator: data iterator that implements the torch.utils.data.DataLoader interface args: user defined arguments dictionary output schema: dictionary (python dict()): a dictionary that contains all data used in the model forward step """ device = torch.cuda.current_device() feat, adj, labels, nodes,traintype = next(data_iterator) nodes = nodes.to(device) labels = labels[0].to(device) feat = feat[0].to(device)#.to_dense() adj = adj[0].to(device) return feat, adj, labels, nodes, traintype[0]
7483d3f4b39d205fbd05cf11310aa2f5d9ae979c
90,479
def install_variables(installer): """ Returns a dictionary of variables for use later in the process (e.g., filling a configuration file). These are combined from all sysconfig files. """ return {}
bd7c926578fea6512929be6f9663dc9a36358f6f
90,482
import math def point_az(x1, x2, y1, y2): """ Determines azimuth between two points (in cartesian coordinates). Parameters ---------- x1 : float point 1 x x2 : float point 2 x y1 : float point 1 y y2 : float point 2 y Returns ------- float Azimuth between points (in degrees) """ azimuth1 = math.degrees(math.atan2((x2 - x1), (y2 - y1))) azimuth = (azimuth1 + 360) % 360 return azimuth
6c59def409e8b680ff885201950f596c55e1c31d
90,483
import re def _validifyName_(name): """ Inner method used to clean anime search query :param name: Anime search query :return: Replaces special characters with '-' to comply with Gogoanime's URL specifications """ # Replace all special characters and get a normalized name where only - is between words, and words in in lowercase newName = re.sub(r' +|:+|#+|%+|\?+|\^+|\$+|\(+|\)+|_+|&+|\*+ |\[+ |]+|\\+|{+|}+|\|+|/+|<+|>+|\.+|\'+', "-", name.lower()) newName = re.sub(r'-+', "-", newName) return newName
1243bc44515f1548d61a031c41384a26a374ae8c
90,485
def _genauth(sinfo): """ Return authentication parameters for stream, if present. """ user = sinfo['aaf_username'] if 'aaf_username' in sinfo else None password = sinfo['aaf_password'] if 'aaf_password' in sinfo else None if user and password: return { 'auth': (user, password) } else: return {}
d71258e15d57333feab1e6d8b597dfcb49735ab8
90,487
import itertools def _get_common_blocks(dp_block_sizes, dp_ids): """Return all pairs of non-empty blocks across dataproviders. :returns dict mapping block identifier to a list of all combinations of data provider pairs containing this block. block_id -> List(pairs of dp ids) e.g. {'1': [(26, 27), (26, 28), (27, 28)]} """ blocks = {} for dp1, dp2 in itertools.combinations(dp_ids, 2): # Get the intersection of blocks between these two dataproviders common_block_ids = set(dp_block_sizes[dp1]).intersection(set(dp_block_sizes[dp2])) for block_id in common_block_ids: blocks.setdefault(block_id, []).append((dp1, dp2)) return blocks
027ac3625e21ab70de8bd656ed2e625236c12fbb
90,488
def candidate_board_uncertain_cells_heuristic(node): """ Taking in a GameTreeNode, return the number of uncertain cells. """ return len(node.board.getUncertainCells())
05635af6f904796900ff5a915bab101bb9268fd5
90,489
def overrides(interface_class): """Decorator that ensures overriden methods are valid. Args: interface_class: the respective super class Example:: class ConcreteImplementer(MySuperInterface): @overrides(MySuperInterface) def my_method(self): print 'hello kitty!' """ def overrider(method): assert(method.__name__ in dir(interface_class)) return method return overrider
a7f53b50ce8b1741ff5ce1cc3dd021c9c5fb3a0a
90,490
import importlib def import_or_none(library): """Attempts to import the requested library. Args: library (str): the name of the library Returns: the library if it is installed, else None """ try: return importlib.import_module(library) except ImportError: return None
454d8fd71888c98d80f898ff426d86392446b499
90,492
def multivalued_cell_join(params): """Join lists into single strings with a separator. Expects a ``dict`` as loaded from OpenRefine JSON script. Args: parameters['description'] (str): Human-readable description parameters['columnName'] (str): Column to edit parameters['separator'] (str): String with which to join values """ column = params['columnName'] sep = params['separator'] def exec_multivalued_cell_join(data): return data.assign(**{column: data[column].apply(lambda x: sep.join(x))}) return exec_multivalued_cell_join
89408a3edd521a3ad73565184ebd90b07653d956
90,494
def speaker_twitters(talk): """ Return a list of the speakers' twitter handles of the talk.""" return [u'@{}'.format(speaker.user.attendeeprofile.p3_profile.twitter) for speaker in talk.get_all_speakers()]
51af75caa5c82953ac8d5a7a9e1f723f646efa52
90,499
def parse_time(time): """Given the time string, parse and turn into normalised minute count""" if type(time) in [int,float]: return time if time.endswith('m'): time = time[0:-1] if time.find('h') != -1: time = time.replace('h', ':') if time.find(':') != -1: hours, minutes = time.split(':') if minutes.strip() == "": minutes = 0 else: hours = 0 minutes = int(time.strip()) total = int(minutes) + (int(hours) * 60) return total
b4effe0eb9b93a761e9f6f83f7d7d9b60bb8d978
90,502
import json def load_json(filepath: str): """ Load JSON file Parameters --- filepath (str) File path for JSON """ try: with open(filepath) as file: return json.load(file) except FileNotFoundError: print("Warning: File %s was not found..." % filepath) return None
5c8d75c68bf9da747f01231d68fa4541a9c353ab
90,511
import torch def get_device(gpu = -1): """Get a device, among those available, on which to compute. Args: $gpu$ (`int`): The gpu to use. Set to -1 to use the last GPU found when GPUs are present; set this to -2 to override using a found GPU and instead use the (first) CPU. Defa- ult: `-1`. Returns: `torch.device`. An instance of `torch.device` which can then be passed to `torch` tensors and modules (using their `to` method). """ if gpu > -2: return torch.device("cuda:{0}".format( (torch.cuda.device_count() + gpu) % torch.cuda.device_count() )) if torch.cuda.is_available() else torch.device('cpu:0') else: return torch.device('cpu:0')
02a583b8e8a7d4795e8855e8322370e992389a0e
90,518
from functools import reduce from math import sqrt def factors(n): """ Finds all factors for a given number. Copied from https://stackoverflow.com/a/19578818 :param n: some integer :return: set with all factors of n """ step = 2 if n % 2 else 1 return set( reduce( list.__add__, ([i, n // i] for i in range(1, int(sqrt(n)) + 1, step) if n % i == 0), ) )
2107f9e51ed20edcd7051fe99dd802a783aef4cf
90,519
import types def namespace_wrapper(map): """ Convert a dict to a SimpleNamespace. If you feed in {'key':'val'}, you'll get out an object such that o.key is 'val'. (It's legal to feed in dict keys like 'x.y-z', but the result will have to be read using getattr().) """ return types.SimpleNamespace(**map)
cc5c5c2df3000b00cda416da788071b7c4579a3d
90,520
def compute_readout(params): """ Computes readout time from epi params (see `eddy documentation <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/EDDY/Faq#How_do_I_know_what_to_put_into_my_--acqp_file.3F>`_). .. warning:: ``params['echospacing']`` should be in *sec* units. """ epi_factor = 1.0 acc_factor = 1.0 try: if params['epi_factor'] > 1: epi_factor = float(params['epi_factor'] - 1) except: pass try: if params['acc_factor'] > 1: acc_factor = 1.0 / params['acc_factor'] except: pass return acc_factor * epi_factor * params['echospacing']
d293ae09803386c34033e7f4d19d6c306f8eb068
90,521
from typing import Callable def _apply_to_inputs(func_to_apply: Callable, *dec_args, **dec_kwargs) -> Callable: """ Decorator function to apply a function to all inputs of a function. Args: func_to_apply: the function to apply to the inputs *dec_args: positional arguments for the function to be applied **dec_kwargs: keyword arguments for the function to be applied Return: the decorated function """ def decorator_fn(func_to_decorate): # actual function applying the give function to inputs def new_func(*args, **kwargs): args = func_to_apply(args, *dec_args, **dec_kwargs) kwargs = func_to_apply(kwargs, *dec_args, **dec_kwargs) return func_to_decorate(*args, **kwargs) return new_func return decorator_fn
3b696ab41d67d2371581b4a1a6ebdddbff5d5eaa
90,528
def get_col_names(name): """ We store results for each simulation in indexed files. Every simulation will indicate its results with a path and an idx property. Returns the pair of column names for the given type of results. Eg: "spikes" -> ("spikes_path", "spikes_idx") """ return f'{name}_path', f'{name}_idx'
8ed302987f4ff686d61fc19f408a35a7aeff7547
90,531
def add_previous_traffic(index, incidents): """Adds traffic level for the preceeding hour to the incident""" incidents[index]['previous_traffic'] = [incident['traffic_level'] for incident in reversed(incidents[index-6:index])] return incidents[index]
772da2b2c76f7162ab365e0f0298d1204e1170e3
90,532
import math def distance(v1, v2): """ Distance between two vectors """ # print("vectors are: ", v1, v2) return math.sqrt(math.pow(v1['x']-v2['x'], 2) + math.pow(v1['y']-v2['y'], 2))
5460c5dabe734ed122d531d8bf7d6d843c46aeb2
90,533
import math def deg2rad(degree): """ :param degree: angle in degree :return: angle in radians """ return degree * math.pi / 180.0
2a9214255d3bd9c773a60695a99efc4163f0b252
90,536
def normalized_distribution(distribution): """Returns a normalized distribution with the individual values adding up to 1. Can also be used to convert counts to a distribution.""" if distribution is None: return None distribution_sum = sum(distribution) if distribution_sum == 0: return None return tuple((value / distribution_sum) for value in distribution)
af28b1072b7a0dcd160a684f9440af65b2179cfe
90,537
import torch def Log(x): """ Log trick for numerical stability """ lt = torch.log(1+torch.exp(-torch.abs(x))) + torch.max(x, torch.tensor([0.]).cuda()) return lt
4529f8904574ad216390fd6a2765f2f1557457ce
90,539
def get_queue_function_name(queue_name): """ Returns the function name of a queue which is not the queue name for namespacing and collision reasons. Args: queue_name: The name of a queue. Returns: The string representing the function name. """ # Remove '-' because that character is not valid for a function name. queue_name = queue_name.replace('-', '_') return "queue___%s" % queue_name
2eeda155eed194a54739f9a7c5de8f3e71a6e28e
90,541
def getIndexDict(idx, labels, shape): """ Get the index tuple of an index in given shape, with the labels. Parameters ---------- idx : int The index that will be decomposed. labels : list of str The labels corresponding to each dimension. shape : tuple of int The shape of given object. Returns ------- dict of (str, int) The index for each dimension, labelled by the corresponding labels. """ res = dict([]) for i in range(len(shape) - 1, -1, -1): res[labels[i]] = idx % shape[i] idx //= shape[i] return res
90333ca668d43774375f3359e6b8628af05b93b0
90,542
def samps2time(samps,delta): """ convert a number of samples to time given the sampling interval. """ return samps * delta
4b5dba79c9c5b61c26e27fb59d8fde3c8ce6cae7
90,544
def read_config_file(path, comment='#'): """Read the lines from a file, skipping empty and commented lines. Don't process comments if 'comment' is falsy. """ with open(path) as file: return [ line for line in (line.rstrip() for line in file) if line and not (comment and line.startswith(comment)) ]
424794ec95f63a484836ffcbeec03d2183c702cf
90,547
def _mobius_to_interval(M): """Convert a Mobius transform to an open interval.""" a, b, c, d = M s, t = a/c, b/d return (s, t) if s <= t else (t, s)
60933f5e301077f2307f2a2a00284d078e31d802
90,552
def clan_url(clan_tag): """Return clan URL on CR-API.""" return 'http://cr-api.com/clan/{}'.format(clan_tag)
f2c05b8ecf71771e259874bd78d862356dc2b902
90,554
import re def format_pars_listing(pars): """Returns a formated list of pars. Args: pars (list): A list of PAR objects. Returns: The formated list as string """ out = "" i = 1 for p in pars: # Shorten to 30 chars max, remove linebreaks name = re.sub(r'[\n\r]', ' ', p.name[:28] + '..' if len(p.name) > 30 else p.name) obj_name = re.sub(r'[\n\r]', ' ', p.object_name[:28] + '..' if len(p.object_name) > 30 else p.object_name) time_cr = f"{p.time_created:%Y-%m-%d %H:%M}" \ if p.time_created is not None else "" time_ex = f"{p.time_expires:%Y-%m-%d %H:%M}" \ if p.time_expires is not None else "" out += (f"{i:>4} {name:30} {obj_name:30} {time_cr:16} {time_ex:16}\n") i += 1 return out
bce22fbe677389a68d13234e99401fa167fb73cc
90,557
def _is_in_group(user, group_names): """ Returns True if a given user is a member of one or more groups. :param user: The User object :type user: django.User :param group_names: A list of group names :type group_names: list[str] :return: Returns True if the user is in at least one of the named groups :rtype: bool """ return user.groups.filter(name__in=group_names).exists()
1cfd7be46b04b9692785227e9893a3e1ce95e33f
90,565
import random def mutate_guess_i(old_guess, i): """randomly mutate single element in string""" ls_old_guess = list(old_guess) rand_value = str(random.randint(0, 9)) ls_old_guess[i] = rand_value return ''.join(ls_old_guess)
9116f87d9f9a85017fafffe55a5ae640cce13eb7
90,567
def _endpoint_from_image_ref(image_href): """Return the image_ref and guessed endpoint from an image url. :param image_href: href of an image :returns: a tuple of the form (image_id, endpoint_url) """ parts = image_href.split('/') image_id = parts[-1] # the endpoint is everything in the url except the last 3 bits # which are version, 'images', and image_id endpoint = '/'.join(parts[:-3]) return (image_id, endpoint)
626484976a35b89ecaf5b363503ac993ee231613
90,570
def _t(s): """A dummy translator that can be used for message extraction without actually doing the translation.""" return s
a2161e2e296e121f663d4588ad5bbdbd1adb6df4
90,571
import math def downsample(data, factor, reduce): """This function is used to downsample data in blocks of size `factor`. This function downsamples blocks of size `factor` using the function specified in reduce. If there are insufficient points to fill a final block, this last block is discarded. Parameters ---------- data : array_like Input data factor : int Factor to downsample by reduce : callable Function to use for reducing the data """ def round_down(size, n): """Round down `size` to the nearest multiple of `n`""" return int(math.floor(size / n)) * n data = data[: round_down(data.size, factor)] return reduce(data.reshape(-1, factor), axis=1)
91df0c778e5296b334e5e66f816d42276a3a983d
90,573
def merge_unique_lists(a, b): """ Merges two lists which are actually sets, returning a sorted result list. """ return sorted(list(set(a) | set(b)))
7249b237198e88534a56a00a79dabd8bbe849afd
90,575
def _literal_distance(fuzzy_clause, fuzzy_value, cf_value): """Distance between two fuzzy sets of a fuzzy variable""" skip = abs(list(fuzzy_clause).index(fuzzy_value) - list(fuzzy_clause).index(cf_value)) distance = skip / (len(fuzzy_clause) - 1) return distance
35d0fe84fa029c11013b082eca9331ab192c5cb0
90,577
def collision_rect_point(rect, p): """True if point p is inside rectangle rect.""" x, y = p if x <= rect.x_min: return False if x >= rect.x_max: return False if y <= rect.y_min: return False if y >= rect.y_max: return False return True
498bc68583bf304201998a35cd2cc6628664b89d
90,579
def in_namespace(uri, base_uri): """ Check if given URI is in the "namespace" of the given base URI """ if any(( base_uri.endswith("#"), base_uri.endswith("/") )): # We chop off last character of base uri as that typically can include a # backslash (/) or a fragment (#) character. base_uri = base_uri[:-1] return uri.startswith(base_uri)
6f6250ac0c7b8b7ffdb22601ed166ea5d12cf175
90,582
from datetime import datetime from dateutil import tz def utc_to_local(utc_dt: datetime) -> datetime: """Convert input UTC timestamp to local timezone. Args: utc_dt: Input UTC timestamp. Returns: A ``datetime`` with the local timezone. """ local_dt = utc_dt.astimezone(tz.tzlocal()) return local_dt
f4bcf9c77cbca63ec1bc7b7f096456ba6d588777
90,583
import inspect def _is_command(obj, cli): """Is this a valid command function? Args: obj (object): candidate cli (module): module to which function should belong Returns: bool: True if obj is a valid command """ if not inspect.isfunction(obj) or obj.__name__.startswith('_'): return False return hasattr(obj, '__module__') and obj.__module__ == cli.__name__;
202d417eec794ad2b9cc3f04f54187e4e7ff0f75
90,584
def get_all_vertices_of_edges_connected_to_vertex(mesh, vertex_u): """ Get all the vertices of edges which are connected to vertex u, exclude u itself. Another output is the edges themselves. """ v_e_u = [] e_u = mesh.ve[vertex_u].copy() # edges connected to vertex u for e in e_u: v1, v2 = mesh.edges[e] if v1 == vertex_u: v_e_u.append(v2) else: v_e_u.append(v1) return v_e_u, e_u
3ebe5ab2e9fbd7b28f2747c7d32ceffcca38d48c
90,590
def split_course_key(key): """Split an OpenEdX course key by organization, course and course run codes. We first try splitting the key as a version 1 key (course-v1:org+course+run) and fallback the old version (org/course/run). """ if key.startswith("course-v1:"): organization, course, run = key[10:].split("+") else: organization, course, run = key.split("/") return organization, course, run
e22135600d332f3bf183fe234e3383abf82ac466
90,594
def valid_tile_size(value, arg_name, min_power=4, logger=None): """ Verifies that the tile size is defined as an integer in [16, 32, 64, 128, 256, 512, 1024, 2048, 4096]. :param value: int, tile size :param arg_name: str, parameter name :param min_power: int, 2^min_power as minimum tile size (consistency with the number of downsampling layers) :param logger: logger instance :return: boolean, True if the tile size is correctly defined, False otherwise """ error = False if not isinstance(value, int): if logger: logger.error(f"Invalid value for the argument {arg_name}: {value}. Enter an integer.\n") else: print(f"ERROR: Invalid value for the argument {arg_name}: {value}. Enter an integer.\n") error = True if value not in [2 ** i for i in range(min_power, 12)]: if logger: logger.error(f"Invalid value for the argument {arg_name}: {value}. Choose among " f"{[2 ** i for i in range(min_power, 12)]}.\n") else: print(f"ERROR: Invalid value for the argument {arg_name}: {value}. Choose among " f"{[2 ** i for i in range(min_power, 12)]}.\n") error = True return not error
b3d00946d6ff21df370e42c58e0405a9fd55dd10
90,596
def default_value(feature): """Decides a default value for the given feature, to replace missing or null values. Parameters ---------- feature (str): Name of feature. Returns ---------- (int or bool): Default value for given feature. """ if feature == 'rqst_timespan': return 36500 elif feature == 'rqst_area_rect': return 129600 elif feature.endswith('_num'): return 0 elif feature == 'converted': return False elif feature.startswith('ds'): return False elif feature in ['PP', 'BR']: return False elif feature in ['SP']: return True else: raise ValueError(f"Feature {feature} has no default value set.")
acdd3b15d01507fb592da8e6dd4d9ae942f7b91e
90,597
def decorator_without_argument(func_to_decorate): """ Function decorator without argument :param func_to_decorate: function to decorate :return: function decorated """ def inner_function(*original_args, **original_kwargs): print("Enter decorator") # Something before response = func_to_decorate(*original_args, **original_kwargs) # Something after return response return inner_function
b785347878d4771d7c9581e5e8d8a5fabbdb9c5b
90,608
def size_of_ins(statement): """Returns the size (in words) of the instruction.""" # Note: the LC-3 is word-addressable, not byte-addressable, so a two-byte # instruction has a size of 1, not 2. # There is a variant called the LC-3b which is byte-addressable. if statement.statement_type == 'INSTRUCTION': # All LC-3 instructions are 2 bytes (size of 1). return 1 elif statement.statement_type == 'DIRECTIVE': if statement.directive_type == 'FILL': return 1 elif statement.directive_type == 'BLKW': return statement.size elif statement.directive_type == 'STRINGZ': byte_count = len(statement.value) + 1 word_count = int(byte_count / 2) return word_count return None
eefb0a95a76ff4c48b85f799696a497d03362020
90,609
def get_oncall_email(client, service: dict) -> str: """Fetches the oncall's email for a given service.""" escalation_policy_id = service["escalation_policy"]["id"] escalation_policy = client.rget(f"/escalation_policies/{escalation_policy_id}") schedule_id = escalation_policy["escalation_rules"][0]["targets"][0]["id"] oncalls = list( client.iter_all( "oncalls", # method { # "include[]": "users", # including users doesn't give us the contact details "schedule_ids[]": [schedule_id], "escalation_policy_ids[]": [escalation_policy_id], }, # params ) ) if len(oncalls): user_id = list(oncalls)[0]["user"]["id"] else: raise Exception( f"No users could be found for this pagerduty escalation policy ({escalation_policy_id}). Is there a schedule associated?" ) user = client.rget(f"/users/{user_id}") return user["email"]
7c7d0f8ff6a86662394cd176e816ba2a28facfcf
90,611
def statement_text_padded(statement): """ Return the statement text padded with leading whitespace so that coordinates in the ast match up with coordinates in the text. """ return ( ("\n" * statement._start_row) + (" " * statement._start_col) + statement._text )
a8a1f2fa1c3a96e5e10be0a571534d2b4647eaa2
90,613
import torch def autoencoder_loss(x_output, y_target): """ autoencoder_loss This implementation is equivalent to the following: torch.nn.BCELoss(reduction='sum') / batch_size As our matrix is too sparse, first we will take a sum over the features and then do the mean over the batch. WARNING: This is NOT equivalent to torch.nn.BCELoss(reduction='mean') as the latter one, mean over both features and batches. """ epsilon = 1e-12 term = y_target * torch.log(x_output + epsilon) + (1. - y_target) * torch.log(1. - x_output + epsilon) loss = torch.mean(-torch.sum(term, 1), 0) return loss
0c3306fc8f83d42a1f541ec689aad5e41fa2d27f
90,614
import hashlib def to_hash(string): """ Hash a string, in this case a url. :param str: a str containing an url :return str: a sha256 hashed string """ hash_object = hashlib.sha256(bytes(string.encode("utf-8"))) return hash_object.hexdigest()
a4ac2929aa7ab218f85a7d0c62204570d2ff2253
90,615
def _flatten(lst): """ Flatten a nested list. """ if not isinstance(lst, (list, tuple)): return [lst] result = [] for item in lst: result.extend(_flatten(item)) return result
2f5b2c95b9f6d23cb58de2d8a91b325983963d6b
90,620
def build_bbox_str_list(bbox): """Build bounding box string representation from ``bbox`` :param bbox: dictionary with keys: ``left``, ``top``, ``right``, ``bottom`` :type bbox: dict :return: bounding box string list [left, top, width, height] :rtype: string """ width = bbox['right'] - bbox['left'] height = bbox['bottom'] - bbox['top'] bbox_str_list = [] bbox_str_list.append(str(max(0, bbox['left']))) bbox_str_list.append(str(max(0, bbox['top']))) bbox_str_list.append(str(width)) bbox_str_list.append(str(height)) return bbox_str_list
91dc4b97ab20de73b01371eb3ddacd3971948289
90,621
def nop(token): """Create a dummy function for tests.""" return token
a4f7e14d37ba14b1e0a316fe61abad9b1894cb2b
90,624
def parse_partition(partition): """Parses one parttiion tuple consisting of size and mount point. """ partition_data = partition.split(":") if len(partition_data) != 2: raise ValueError("Partitions line parts format is 'size:mount'") return partition_data
09ef01c56d3002552e29d1812cab1ff798beeb0f
90,626
from datetime import datetime def get_assignments_due(canvas_obj, course_id, date, include_no_date=False): """ Returns a list of assignments due past the specified date. """ # Create course object with given id course = canvas_obj.get_course(course_id) # Create list of assignments within the course assignments = course.get_assignments() # Create list that will contain the assignments due after the given date assignments_to_return = list() for assignment in assignments: # If the assignment has a due date and no date assignments should not be included if (assignment.due_at != None) and not include_no_date: due_date = datetime.strptime(assignment.due_at[:10], '%Y-%m-%d') # Add all assignments due after the given date if due_date > date: assignments_to_return.append(assignment) # If the assignment has no due date and assignnments with no due date should be included elif (assignment.due_at == None) and include_no_date: assignments_to_return.append(assignment) return assignments_to_return
90571bc4784a8580cad64d1065a1bcfe123bba66
90,628
def restrict(d, languages=['en', 'de']): """Restrict a dictionary with the labels or aliases to the specified languages only""" # lang = ['en', 'de', 'zh', 'hi', 'es', 'fr', 'ar', 'bn', 'ru', 'pt', 'id'] return dict((k, v) for (k, v) in d.items() if k in languages)
04338004f062d1eaa9e26eb73c5b959a83cf2dda
90,629
import requests def get_file_size(url, params, timeout=10): """Get file size from a given URL in bytes. Args: url: str. URL string. timeout: int, optional. Timeout in seconds. Returns: int. File size in bytes. #### Examples ```python get_file_size(url) ## 178904 ``` """ try: response = requests.get(url, params={}, stream=True) except (requests.exceptions.HTTPError) as e: print(e) return 0 try: file_size = int(response.headers["Content-Length"]) except (IndexError, KeyError, TypeError): return 0 return file_size
642e733a1385423f0f8a894e8e3bfb96ea15dda4
90,630
def none_provider(_request, _feature_flag_name): """Return None for any given feature flag name.""" return None
4446453832073a6be5fad4dea361fb1ef2c47b92
90,637
def get_max_subarray_sum(nums_array: list) -> int: """ Algorithm for getting the maximum sum of a subarray (Kadane's Algorithm) Complexity --> O(N) :param nums_array: list :return sum: int """ global_sum = local_sum = nums_array[0] for i in range(1, len(nums_array)): if local_sum + nums_array[i] > nums_array[i]: local_sum += nums_array[i] else: local_sum = nums_array[i] if local_sum > global_sum: global_sum = local_sum return global_sum
5ff5e26836c3014c8916057cb4890a70bafb986a
90,638
def to_solution(parameters, columns): """Convert a solution given as a list pairs of column indices and values to a solution given as a list of pairs of columns and values.""" return [(parameters.columns[column_id], value) for column_id, value in columns]
e8565a1e956b3dac0e8d2e555253fa9f744e10db
90,640
def primes2booleannet(primes, header=""): """Convert a pyboolnet primes dictionary to a BooleanNet string reperesentation. Parameters ---------- primes : pyboolnet primes dictionary Update rules to convert. header : str Text to include at the beginning of the file, e.g., comment lines. For example, the legacy Java version of StableMotifs requires rules files to begin with the line "#BOOLEAN RULES". Returns ------- str BooleanNet representation of update rules. """ lines = [] width = max([len(x) for x in primes]) + 3 for name in primes: if primes[name][0] == [] or primes[name][1] == [{}]: expression = '1' elif primes[name][1] == [] or primes[name][0] == [{}]: expression = '0' else: expression = ' or '.join([' and '.join([x if term[x]==1 else 'not '+x for x in term]) for term in primes[name][1] ]) lines+= [(name+'*=').ljust(width)+expression] lines+=[''] return header + "\n".join(lines)
f2d10599d8386e9ed330129cad753e0ee0f52409
90,644
def boost_npa(group, npa): """ Return a fraction of boost based on total number of peptides per IAR. :param pandas.core.frame.DataFrame group: The IAR under review. :param float npa: The total boost amount allotted to this criterion. :returns: The percent of boost provided by this criterion. :rtype: float >>> df = pandas.DataFrame({'binding_score': [0.1, 0.4, 1.5, 0.04, 0.44, \ 0.1, 0.4, 1.5, 0.04, 0.44]}) >>> boost_npa(df, 1) 0.4 >>> df = pandas.DataFrame({'binding_score': [0.1, 0.4, 1.5, 0.04, 0.44, \ 0.1, 0.4, 1.5, 0.04, 0.44, \ 0.1, 0.4, 1.5, 0.04, 0.44, \ 0.1, 0.4, 1.5, 0.04, 0.44]}) >>> boost_npa(df, 1) 0.9 """ n = len(group) return round(npa * ((n >= 10) * 0.4 + (n >= 15) * 0.3 + (n >= 20) * 0.2 + (n >= 30) * 0.1), 2)
3843b027219f810a99a3ce2f8117eddae3f25400
90,645
def train_test_validation(closed_data, train_frac=0.8): """ 8:1:1 train, text ,validation split """ train = closed_data.sample(frac=train_frac) remain = closed_data.drop(train.index) test = remain.sample(frac=0.5) validation = remain.drop(test.index) return train, test, validation
6aadd302f5f77b6b9c78e22f9f9b9b42061b145b
90,649
def get_starttime(event): """Delegate for sorting Calendar entries by event_start time""" return event["DTSTART"].dt
f4bf84311ea272eaf4697b07cb4f25e7b6e30921
90,654
def screen(row, col, maxwell, **kwargs): """ Create a screen with global parameters Parameters ---------- row : string Parameter encoded by the row letter col : string Parameter encoded by the column number maxwell : string Name of the well in the bottom-right corner of each tray, e.g. 'H6' **kwargs : any type Any named arguments become global parameters to be applied to all wells in all trays in the screen Returns ------- screen : dict A dictionary containing the screen """ # make sure maxwell is properly formatted if len(maxwell) != 2: raise ValueError(f'maxwell must have length 2, supplied {maxwell} has length {len(maxwell)}') elif not maxwell[0].isalpha(): raise ValueError(f'first character of maxwell must be a letter, supplied {maxwell} has first character {maxwell[0]}') elif not maxwell[1].isdigit(): raise ValueError(f'second character of maxwell must be a number, supplied {maxwell} has second character {maxwell[1]}') screen = {"row": row, "col": col, "maxwell": maxwell} screen["statics"] = {} for key, value in kwargs.items(): screen["statics"][key] = value return screen
4b6c3ff505c76740fe236ec48913695fd220371d
90,655
from pathlib import Path def true_stem(path: Path) -> str: """True stem of a path, without all suffixes, not just last.""" return path.name[: -(len("".join(path.suffixes)))]
efc6916c81e41b82bdc1da82941f04ba7747f2a2
90,657
def power_solar(solar_irradiation, conductor): """Section 3.3, Eq 8, page 18.""" return conductor.absortivity * solar_irradiation * conductor.diameter
66f2c86a865b2221a20ef21d551004230fff40f9
90,658
def add_source_names(source_ids, names): """Add names of sources to list of source IDs turning the list into a list of tuples. """ return sorted([(s_id, names.get(int(s_id))) for s_id in set(source_ids)])
edea016623676f498f5cca8ccf7df9ee321590bc
90,662
import pathlib import json def load_json_config(json_config_file: pathlib.Path): """ Loads configuration info from a JSON file. :param json_config_file The JSON configuration file path. :return The configuration info values. """ with open(json_config_file, mode="r") as config: return json.load(config)
49c5c90bbbf75098d27154a511aa0cd7fe8c1958
90,664
import torch def compute_edge_feats_dict(edge_ixs, det_df, fps, use_cuda): """ Computes a dictionary of edge features among pairs of detections Args: edge_ixs: Edges tensor with shape (2, num_edges) det_df: processed detections datafrmae fps: fps for the given sequence use_cuda: bool, determines whether operations must be performed in GPU Returns: Dict where edge key is a string referring to the attr name, and each val is a tensor of shape (num_edges) with vals of that attribute for each edge. """ device = torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu") row, col = edge_ixs secs_time_dists = torch.from_numpy(det_df['frame'].values).float().to(device) / fps bb_height = torch.from_numpy(det_df['bb_height'].values).float().to(device) bb_width = torch.from_numpy(det_df['bb_width'].values).float().to(device) feet_x = torch.from_numpy(det_df['feet_x'].values).float().to(device) feet_y = torch.from_numpy(det_df['feet_y'].values).float().to(device) mean_bb_heights = (bb_height[row] + bb_height[col]) / 2 edge_feats_dict = {'secs_time_dists': secs_time_dists[col] - secs_time_dists[row], 'norm_feet_x_dists': (feet_x[col] - feet_x[row]) / mean_bb_heights, 'norm_feet_y_dists': (feet_y[col] - feet_y[row]) / mean_bb_heights, 'bb_height_dists': torch.log(bb_height[col] / bb_height[row]), 'bb_width_dists': torch.log(bb_width[col] / bb_width[row])} return edge_feats_dict
37d7b4c1ba144260747de0aa677314649bb2906a
90,668
from typing import Dict def group_production(production: Dict[str, int]) -> Dict[str, int]: """Group various types of coal (e.g. lignite, hard coal) and hydro (river, reservoir). Args: production (Dict[str, int]): Dictionary with production for each resource type. Returns: Dict[str, int]: Dictionary with production for each resource type. Only one entry for various types of coal. """ grouped_prod: Dict[str, int] = {} for k, v in production.items(): if k.startswith('uhli'): grouped_prod['uhli'] = grouped_prod.setdefault('uhli', 0) + v elif k.startswith('voda'): grouped_prod['voda'] = grouped_prod.setdefault('voda', 0) + v else: grouped_prod[k] = v return grouped_prod
8605b7bee8d3be1cd2644df447b215a533cfa458
90,671
def color_spot(htmlcolorcode, text=None): """ HTML snippet: span with class 'colorspot' and `htmlcolorcode` as background color """ if text is None: text = htmlcolorcode return u'<span class="colorspot" style="background-color:%s;">&nbsp;&nbsp;&nbsp;</span>&nbsp;%s' % (htmlcolorcode, text)
be0368fe5a0fad42a84cdbfa46615e71405af785
90,672
def _normalize_deviation_args(lower, upper, msg): """Normalize deviation acceptance arguments to support both "tolerance" and "lower, upper" signatures. This helper function is intended for internal use. """ if isinstance(upper, str) and msg is None: upper, msg = None, msg # Shift values if using "tolerance" syntax. if upper == None: tolerance = lower if tolerance != abs(tolerance): raise ValueError('tolerance should not be negative, ' 'for full control of lower and upper ' 'bounds, use "lower, upper" syntax') lower, upper = -tolerance, tolerance if lower > upper: raise ValueError('lower must not be greater than upper, got ' '{0} (lower) and {1} (upper)'.format(lower, upper)) return (lower, upper, msg)
3bd35a9bb12ac30745da165cf085502d021a6083
90,678
def quizzable(obj): """Return if obj is quizzable.""" attrs = ('__question__', '__options__', '__check__', '__score__', '__passing__', '__total__') return [hasattr(obj, attr) for attr in attrs]
1e5b70f650aaaba17cc04add3cb850e7fdd87aeb
90,680
def is_pixel_inside(dim, coord): """ Check if the pixel coordinate is inside the image """ if (len(dim)<2) or (len(coord)<2): raise Exception("Dimensions should be >= 2! Check!") if (0<=coord[0]<=dim[0]) and (0<=coord[1]<=dim[1]): return True else: return False
337de0143a35f06a0e86019901cb9a670b4b074d
90,682
def calc_gamma_ref_via_menq_2003(c_u, p_eff, p_atm=101.0e3): """ Calculate the reference strain for a modulus reduction curve from Menq :param c_u: float Uniformity coefficient (Grain size ratio d_60 / d_10) :param p_eff: :return: """ big_a_gamma = 0.12 * c_u ** -0.6 n_gamma = 0.5 * c_u ** -0.15 return big_a_gamma * (p_eff / p_atm) ** n_gamma / 100
5ed98e372cf8ebc81721ea4edbb6323f8512efe2
90,687
def _flag_awakenings(hypno, thresh): """ Mark awakenings as Long or Short depending on threshold in minutes Parameters ---------- hypno : pd.DataFrame Hypnogram dataframe obtained through _read_hypno(). thresh : int or float, positive non-zero Minimum duration in minutes for awakenings to qualify as long. Returns ------- hypno : pd.DataFrame Hypnogram with added columns for type of awakening. """ # Sanity checks assert isinstance(thresh, (int, float)), "Threshold should be a numeric type" assert thresh > 0, "Threshold should be a positive number (minutes)" # Seggregate long and short awakenings based on a 2 minute threshold wake_mask = hypno["Stage"] == "W" hypno.loc[(wake_mask) & (hypno["Duration"] <= thresh), "Awakening"] = "Short" hypno.loc[(wake_mask) & (hypno["Duration"] > thresh), "Awakening"] = "Long" return hypno
a270665b50140117c0651b3bf3d8bbbb18725c00
90,691
def validate_lag(Tplot, chrono): """Return T_lag: - equal to Tplot with fallback: HMM.t.Tplot. - no longer than HMM.t.T. Also return corresponding K_lag, a_lag.""" # Defaults if Tplot is None: Tplot = chrono.Tplot # Rename T_lag = Tplot assert T_lag >= 0 # Validate t2 = chrono.tt[-1] t1 = max(chrono.tt[0], t2-T_lag) T_lag = t2-t1 K_lag = int(T_lag / chrono.dt) + 1 # Lag in indices a_lag = K_lag//chrono.dkObs + 1 # Lag in obs indices return T_lag, K_lag, a_lag
f82ddd0015955ea05d769ab304fd9c8ba54efddc
90,696
def to_bool(val): """Take a string representation of true or false and convert it to a boolean value. Returns a boolean value or None, if no corresponding boolean value exists. """ bool_states = {'true': True, 'false': False, '0': False, '1': True} if not val: return None if isinstance(val, bool): return val val = str(val) val = val.strip().lower() return bool_states.get(val)
5610248e459ad7f736028fa6093d1639c6dd6c23
90,712
def drop_internal_nodes(network): """ Given a network from `build_monodirectional_network`, drop nodes that are "internal" to a segment. Nodes are internal if their edges only connect it with a single segment. Doesn't drop nodes that are dead_ends. Drops the nodes in place and returns a list of the nodes that were dropped. Parameters ---------- network: nx.DiGraph A graph created by `build_monodirectional_network`. Returns ------- list of nodes that were dropped from the network. """ to_remove = [] for obj in network: if 'node' in obj: if ( # Not a dead end (len(network[obj]) > 0) and (len(list(network.predecessors(obj))) > 0) and # Comes and goes to the same physical ID (len(set(network[obj]).union(network.predecessors(obj))) == 1) ): to_remove.append(obj) for obj in to_remove: network.remove_node(obj) return to_remove
1ccaecb1b90f7a3e412490233320736306d4d290
90,716
def prime_factors(number): """ Finds the prime factors of a number :param number: Integer number to find prime factors for :return: list of primes factors for the given number :rtype: list An example >>> prime_factors(13195) [5, 7, 13, 29] """ c, res = 2, [] while c * c <= number: while (number % c) == 0: # repeats multiple factors res.append(c) number /= c c += 1 if number > 1: res.append(number) return res
100c1efa28e1a64777ad0e6abd98501896b54433
90,724
def divide(value, arg): """ Divides the value by the arg :param value: :param arg: :return: """ if value: return value / arg else: return None
fc65871afd03facf949a260782bbf4b376ea77ff
90,729
from typing import List from typing import Tuple def _calculate_gamma_and_epsilon(zero_counts: List[int], one_counts: List[int]) -> Tuple[int, int]: """Calculates the gamma and epsilon rate based on the one and zero counts the returns them. Args: zero_counts (List[int]): the number of 0 digits across all diagnostic data for each digit position one_counts (List[int]): the number of 1 digits across all diagnostic data for each digit position Returns Tuple[int,int]: (gamma, epsilon) """ binary_number = '' binary_number_complement = '' for one_count, zero_count in zip(one_counts, zero_counts): binary_number += (str(int(one_count > zero_count))) binary_number_complement += (str(int(one_count < zero_count))) gamma = int(binary_number, base=2) epsilon = int(binary_number_complement, base=2) return gamma, epsilon
a4a1086aac97b1ad59b4e945553a4d5b66426b30
90,730
import re def clean(filepath: str) -> str: """Clean up the content of a subtitle file (vtt) to a string Args: filepath (str): path to vtt file Returns: str: clean content """ # read file content with open(filepath, "r", encoding="utf-8") as fp: content = fp.read() # remove header & empty lines lines = [line.strip() for line in content.split("\n") if line.strip()] lines = lines[1:] if lines[0].upper() == "WEBVTT" else lines # remove indexes lines = [lines[i] for i in range(len(lines)) if not lines[i].isdigit()] # remove timestamps pattern = r"^\d{2}:\d{2}:\d{2}.\d{3}.*\d{2}:\d{2}:\d{2}.\d{3}$" lines = [lines[i] for i in range(len(lines)) if not re.match(pattern, lines[i])] content = " ".join(lines) # remove duplicate spaces pattern = r"\s+" content = re.sub(pattern, r" ", content) # add space after punctuation marks if it doesn't exist pattern = r"([\.!?])(\w)" content = re.sub(pattern, r"\1 \2", content) return content
614ec11ff35d4dcc3249bf87273fb363a4d79b32
90,731
import requests def image_exists_on_dockerhub(image_name: str, image_tag: str) -> bool: """ Given an image name and image_tag, check if it is publicly-accessible on DockerHub. Based on the code from this blog post: * htttps://ops.tips/blog/inspecting-docker-image-without-pull/ :param image_name: Name of an image, such as ``continuumio/miniconda3`` :param image_tag: Tag for the image, such as ``4.8.2`` :Example: .. code-block:: python # should be True image_exists_on_dockerhub("continuumio/miniconda3", "4.8.2") # should be False import uuid image_exists_on_dockerhub(str(uuid.uuid4()), "0.0.1") """ url = ( "https://auth.docker.io/token?scope=repository:" f"{image_name}:pull&service=registry.docker.io" ) res = requests.get(url=url) res.raise_for_status() token = res.json()["token"] res = requests.get( url=f"https://registry-1.docker.io/v2/{image_name}/manifests/{image_tag}", headers={ "Accept": "application/vnd.docker.distribution.manifest.v2+json", "Authorization": f"Bearer {token}", }, ) return res.status_code == 200
989214d56a2a4635759c1bc6fc821e63a2e28718
90,734
def cut_suffix(name, suffix): """Cuts off the *suffix* from *name* string, if it ends with it :param name: original name from which suffix will be cut off :type name: string :param suffix: string to be removed :return: string without suffix """ if isinstance(name, str) and name.endswith(suffix): name = name[:-len(suffix)] return name
6c55b265a7b31fc97598ecaa6d7aa6e51d13440b
90,736
import re def valid_org(s): """Is this a valid GitHub org?""" return isinstance(s, str) and re.match(r"^[^/]+$", s)
fe61ba0eb07df0bf2a19556ac1e4ed8e88121e1c
90,738
def frequency_to_probability(frequency_map, decorator=lambda f: f): """Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total. Example: >>> frequency_to_probability({'a': 2, 'b': 2}) {'a': 0.5, 'b': 0.5} Args: frequency_map (dict): The dictionary to transform decorator (function): A function to manipulate the probability Returns: Dictionary of ngrams to probability """ total = sum(frequency_map.values()) return {k: decorator(v / total) for k, v in frequency_map.items()}
dcc07f4e6aebd08c6ff84ff0bec95141eb1aa0a5
90,744
def square_meter2ha(value): """Convert m2 to ha.""" return value / 10000
c84e6971b8be7e086ebbb5327f5d962646bc2ae4
90,745
def develop_filename(args, today): """ Figues out whether to use a supplied filename or a date stamped entry """ if args.outfile is not None: return args.outfile return 'utlization-summary-{}.csv'.format(today.strftime("%Y-%m-%d"))
5469a51b369c4c8151b46057fa1c8ae15a251009
90,748
def load_split_indices(file_path, fold_index=0): """ Args: file_path: path to split indices file fold_index: index of the fold whose train and test indices you want Returns: train_index: list of integer indices for train data test_index: list of integer indices for test data File of the form train 0 1 0 2 ... test 3 4 6 ... train 1 5 2 ... test 6 8 9 ... ... """ with open(file_path, 'r') as file: lines = file.readlines() assert len(lines) >= (2 * fold_index) + 2, 'Error: not enough information in fold indices file %d < % d' \ % (len(lines), (2 * fold_index) + 2) train_index = lines[(fold_index * 2)].split(' ')[1:] test_index = lines[(fold_index * 2) + 1].split(' ')[1:] # Convert string indices to ints train_index = [int(i) for i in train_index] test_index = [int(i) for i in test_index] return train_index, test_index
ab2b850ab385ef1b4c2c9e4636a5d4c2d8910521
90,755