content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def joblib_cache(ds, dirname, version, numpy_keys=None, mmap_mode=None, eager=True): """ chunks data together, converting values in `keys` into numpy arrays, (for more efficient storage) then dechunks them afterwards dirname: directory where to store the cache keys: which keys should be converted into numpy arrays """ if numpy_keys is None: numpy_keys = [] return ds.numpy_chunk( keys=numpy_keys, ).cache( dirname=dirname, version=version, backend="joblib", eager=eager, mmap_mode=mmap_mode ).dechunk( )
b41ddc35d53dd82d5614e4c4aea0db26c25b1e42
29,716
def find_in_reference_list(reference_list, source_name): """Check if it is already in reference list""" for reference in reference_list: if reference['sname'] == source_name: return True return False
09769d77724ba58d41b54ef0b182e110f606161e
29,717
import numpy def _subset_xy_grid_for_interp( field_matrix, grid_point_x_coords_metres, grid_point_y_coords_metres, query_x_coords_metres, query_y_coords_metres): """Subsets x-y grid before interpolation. Interpolation will be done from the x-y grid to the query points. M = number of rows in original grid N = number of columns in original grid m = number of rows in subset grid n = number of columns in subset grid :param field_matrix: M-by-N numpy array of gridded values, which will be interpolated to query points. :param grid_point_x_coords_metres: length-N numpy array of x-coordinates at grid points. Assumed to be sorted in ascending order. :param grid_point_y_coords_metres: length-M numpy array of y-coordinates at grid points. Assumed to be sorted in ascending order. :param query_x_coords_metres: numpy array (any dimensions) with x-coordinates of query points. :param query_y_coords_metres: numpy array (equivalent shape to `query_x_coords_metres`) with y-coordinates of query points. :return: subset_field_matrix: m-by-n numpy array of gridded values. :return: subset_gp_x_coords_metres: length-n numpy array of x-coordinates at grid points. :return: subset_gp_y_coords_metres: length-m numpy array of y-coordinates at grid points. """ valid_x_indices = numpy.where(numpy.logical_and( grid_point_x_coords_metres >= numpy.min(query_x_coords_metres), grid_point_x_coords_metres <= numpy.max(query_x_coords_metres)))[0] first_valid_x_index = max([valid_x_indices[0] - 2, 0]) last_valid_x_index = min([ valid_x_indices[-1] + 2, len(grid_point_x_coords_metres) - 1 ]) valid_y_indices = numpy.where(numpy.logical_and( grid_point_y_coords_metres >= numpy.min(query_y_coords_metres), grid_point_y_coords_metres <= numpy.max(query_y_coords_metres)))[0] first_valid_y_index = max([valid_y_indices[0] - 2, 0]) last_valid_y_index = min([ valid_y_indices[-1] + 2, len(grid_point_y_coords_metres) - 1 ]) subset_field_matrix = field_matrix[ first_valid_y_index:(last_valid_y_index + 1), first_valid_x_index:(last_valid_x_index + 1) ] return ( subset_field_matrix, grid_point_x_coords_metres[ first_valid_x_index:(last_valid_x_index + 1) ], grid_point_y_coords_metres[ first_valid_y_index:(last_valid_y_index + 1) ] )
112261e863a8afda6c5baa7cc63b7a9e36b64fe8
29,719
import numpy def npmedian(x): """Numpy Library Median""" return numpy.median(x)
4aa37d3c149cb4fa326aa963ef9813547975b00f
29,720
def canonicalize(data): """Restructure to preferred TCZYX or CZYX form...""" data = data.transpose(*[d for d in map(data.axes.find, 'TCIZYX') if d >= 0]) projection = [] if 'T' in data.axes and data.shape[0] == 1: projection.append(0) # remove trivial T dimension if 'C' not in data.axes: projection.append(None) # add trivial C dimension elif projection: projection.append(slice(None)) if projection: projection += [slice(None) for d in 'ZYX'] data = data.lazyget(tuple(projection)) return data
d61b4e87be5e78ba96e0d15536674ed58dff32b4
29,721
def upper(input_string: str) -> str: """Convert the complete string to uppercase.""" return input_string.upper()
9acbac80cf1de15a1b21f4413d75e91e171c6c3f
29,722
import traceback def tryExcept(myFunction): """ A decorator the surrounds the inner function with a try/except EXAMPLE USE @tryExcept def fragileFunction(): pass """ def wrapper(*args, **kwargs): try: return myFunction(*args, **kwargs) except Exception as error: traceback.print_exception(type(error), error, error.__traceback__) return wrapper
9cd6275f785886dfbc840541c765d6a274bb9143
29,723
def auth_set(hashed_sks, auth_set_indices, height, hashfun): """Return the authentication set defined by the given indices. Keyword arguments: hashed_sks -- the hased secret key components which form the leaves of the tree auth_set_indices -- A list of tuples (h, i) defining the height and index of the nodes that should end up in the authentication set height -- the height of the binary tree hashfun -- a hash function of 2n -> n that is used to produce the parent node from its two child nodes Returns: A list containing tuples ((h, i), hash), the height, index, and hash of the node in the authentication set. The order of the nodes in the returned list is equal to the order of the nodes in auth_set_indices. """ tree = [None] * height tree[0] = hashed_sks # the leaves for current_height in range(1, height): # We don't need to compute the root, # otherwise this would be off by one num_nodes = 2**(height - current_height) tree[current_height] = [None] * num_nodes for index in range(0, num_nodes): left = tree[current_height - 1][2*index] right = tree[current_height - 1][2*index + 1] tree[current_height][index] = hashfun(left, right) return [((h, i), tree[h][i]) for h, i in auth_set_indices]
0f4565d4aa399ab534a962b6f3ec89fb3325cdb5
29,725
def _var_names(self): """ Covers more activations than act_names """ predictor = self predictor.retain(True) x = self.xp.random.randn(16,3,32,32).astype('f') #t = self.xp.random.randint(0,1, size=(16,)) #loss = self(x, t) loss = self(x) names = [name for name,_ in predictor.namedvars()] del loss predictor.retain(False) return names
eed797a5865bd3625156ea8c0474c5ccf0f40f0b
29,726
import subprocess def git(*args, err_ok=False, stderr_to_stdout=False): """Runs a git command, returning the output.""" stderr = subprocess.DEVNULL if err_ok else None stderr = subprocess.STDOUT if stderr_to_stdout else stderr try: return ( subprocess.check_output(["git"] + list(args), stderr=stderr) .decode("utf-8") .strip() ) except subprocess.CalledProcessError: if not err_ok: raise return ""
b905f1a6cd5d081785fbb897a89a59e4d544a47f
29,727
def read_credentials(filepath='.yelp/credentials'): """Read credential file and return dictionary of important information""" with open(filepath, 'r') as f: contents = f.readlines() credentials = {} key_items = ['client_id', 'api_key'] for line in contents: for item in key_items: if item in line: credentials[item] = line.split('=')[1].strip() return credentials
998ab1a770fb2c819757869772214fad89c7e13a
29,728
def get_ns(tag): """ Extract namespace. This function is opposite to get_local_name, in that it returns the first part of the tag: the namespace. Parameters ---------- tag : str Tag to process. """ p_open = tag.find('{') p_close = tag.find('}') if p_open != -1 and p_close != -1: return tag[p_open+1:p_close] else: return ''
d10d1e8d2a0a89d5aadd9fd4bf509a0e6e49d49e
29,729
import ast def convert_types_in_dict(xml_dict): """ Evaluates all dictionary entries into Python literal structure, as dictionary read from XML file is always string. If value can not be converted it passed as it is. :param xml_dict: Dict - Dictionary of XML entries :return: Dict - Dictionary with converted values """ out = {} for el in xml_dict: try: out[el] = ast.literal_eval(xml_dict[el]) except ValueError: out[el] = xml_dict[el] return out
88e0030c2e1612802fd13c46c4ba6d7a1a2f8885
29,730
import os def get_access_token(sandbox=False): """get upload key, strip white space.""" if sandbox: return open(os.path.join(os.environ["HOME"], ".sandbox_id"), "r").read().strip() else: return open(os.path.join(os.environ["HOME"], ".zenodo_id"), "r").read().strip()
7c8a61823120058674cf183d213f45810b89ca2c
29,731
import os def dir_from_path(path): """If path is file address. Strip file name and retrieve dir directory""" if not os.path.isdir(path): path = os.path.dirname(path) return path
05fa4d99d623d11f532d181ff0bad2ea1a05db4e
29,732
def nvmf_set_max_subsystems(client, max_subsystems=None): """Set NVMe-oF target options. Args: max_subsystems: Maximum number of NVMe-oF subsystems (e.g. 1024) Returns: True or False """ params = {} params['max_subsystems'] = max_subsystems return client.call('nvmf_set_max_subsystems', params)
5c41028b040f055c2ba2fea8dec05e1744f46310
29,734
def get_parameters(parameters): """ Gives confidence and support, also if not given in parameters """ confidence = parameters.get("min_confidence", 0.75) support = parameters.get("min_support", 2) return confidence, support
e619fca63833651b1625a45ae2cfda806d227cd1
29,735
import os def format_stacktrace(stack): """Print a stack trace that is easier to read. * Reduce paths to basename component * Truncates the part of the stack after the check failure """ lines = [] for _, f in enumerate(stack): fname = os.path.basename(f[0]) line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3]) lines.append(line) return lines
3868ae40e81fc9867c11132f97630ae4c058851b
29,736
import math def ZScore_GeneGo(n, N, n1, n2): """Each subnetwork is associated with a Z-score which ranks the subnetworks according to saturation with the objects from the initial gene list. The Z-score ranks the subnetworks of the analyze network algorithm with regard to their saturation with genes from the experiment. A high Z-score means the network is highly saturated with genes from the experiment. The formula for the Z-score is: n (white in picked), N (total), n1 (white), n2 (picked, my gene list) Z = (n-n1*n2/N)/Sqrt(n1*n2/N*(1-n2/N)(1-(n1-1)/(N-1))) Z = (n*N-n1*n2)/Sqrt(n1*n2*(N-n1)*(N-n2)/(N-1)) notice this formula is symmetrical for n1 and n2""" r=math.sqrt(n1*n2*(N-n1)*(N-n2)/(N-1)) if r<1e-100: return 0.0 return (n*N*1.0-n1*n2)/r
92c8a46ffad5a3e218c45826423fa3ee31e4fee5
29,737
import functools def to_pagination(): """Decorator to convert the result query into a pagination object. :return: Decorated function :rtype: fun """ def decorator_add_sorting(func): @functools.wraps(func) def decorator(*args, **kwargs): """Converts the query into a pagination object.""" query_args = args[0] per_page = query_args.pop('per_page') page = query_args.pop('page') query = func(*args, **kwargs) return query.paginate(page, per_page) return decorator return decorator_add_sorting
112021416a82735ef9af7f149cd959f2d3fbe004
29,740
from time import perf_counter from time import time def timer(): """Multiplying is done o return millis and keep backwards compatibility. """ try: return int(round(perf_counter() * 1000)) except ImportError: return int(round(time() * 1000))
7420b19387147cc2d1ba20ccbb0dbe97160dd6f2
29,743
def returns(k, df): """ Computes the log returns over a k-day period Arguments: k -- window size df -- yahoo finance dataframe Returns: Log returns over a day period """ return ((df.open / df.adj_close.shift(k)) - 1) * 100
1190ec7bd6b366049caebde7ec180343d6f96a9d
29,744
def read_acl_file(filename): """Read contents of given acl file. Args: filename: name of file to be read for acl information Returns: list: list containing ACL entries """ f = open(filename, 'r') content = f.readlines() f.close() # Parse acl = [] for entry in content: if not entry.startswith("#"): acl.append(entry.strip()) return acl
9fb3e08bc35db72f90ee586f26de2fca04e4a66b
29,745
def is_status_complete(status: str) -> bool: """ Check if stack status is running and complete. """ return status in ("CREATE_COMPLETE", "UPDATE_COMPLETE")
d134f9ad2c8f106f74aef226abebfeedec381f9f
29,746
import os def get_version(): """get the package version from __init__.py modified from this source: https://packaging.python.org/guides/single-sourcing-package-version/ """ package_dir = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(package_dir, "astropaint", "__init__.py"), "r") as f: for line in f.readlines(): if line.startswith('__version__'): delim = '"' if '"' in line else "'" return line.split(delim)[1] raise RuntimeError("Package version not found!")
9aa5fe0ab9498aea035b36b6d1c6a1d579e38145
29,748
import math def create_tsp_data(rt): """Stores the distance matrix for the stops on the route""" data = {} data['time_matrix'] = [] row = [] for i in range(len(rt.stop)): for j in range(len(rt.stop)): if rt.stop[i].key != rt.stop[j].key: row.append(int(math.floor( rt.travel_time_dict[rt.stop[i].key][rt.stop[j].key] * 10 + rt.stop[i].planned_service_time * 10))) else: row.append(0) # diagonal elements of the matrix are set to zeros data['time_matrix'].append(row) row = [] data['service_time'] = [] data['time_windows'] = [] for i in range(len(rt.stop)): left_end_point = math.floor(rt.stop[i].start_time_window * 10) right_end_point = math.floor(rt.stop[i].end_time_window * 10 - rt.stop[i].planned_service_time * 10) data['time_windows'].append((int(left_end_point), int(right_end_point))) data['service_time'].append(rt.stop[i].planned_service_time) data['num_vehicles'] = 1 data['depot'] = 0 return data
0af2a3f3bfe9298992ec7467a93cdeafb711720f
29,749
import random def next_household_ages(household_list): """ :param: complete list of households :return: randomly select a type of household from a distribution suitable to City, and return the list of the ages of the people in that household """ return random.choice(household_list)
935ecbda9e0e725bf144b9026041602e0a659658
29,750
import argparse def create_arg_parser(): """Creates argument parser Returns: argparse.ArgumentParser: Parser to fetch arguments from terminal """ parser = argparse.ArgumentParser(description="Welcome to Homeless Network Server! (run to start server)", prog="HomelessNet") parser.add_argument('--init', dest='run_init', action='store_true', help="Fetch tweets, load ES index, start the server") parser.add_argument('--esload', dest='run_es_load', action='store_true', help="Fetch tweets from twitter api and load ES index") return parser
f612c49cff194c0ee79072cc80ca92bb9acd1ee7
29,751
def multiply(value, arg): """ Multiplies the arg and the value :param value: :param arg: :return: """ if value: value = "%s" % value if type(value) is str and len(value) > 0: return float(value.replace(",", ".")) * float(arg) return None
66b9785dd7f142e1831793662749e74cceb9c93d
29,752
import math def mapping_options(target_line_angle, step_angle): """takes the target line slope angle, and the given rule step angle, and returns the two lines that intersect at the target line at the step angle""" # first get base step angle slopes base = [math.tan(math.radians(step_angle)), math.tan(math.radians(-step_angle))] # now construct other lines and give data, in form [line angle 1, line angle 2] lines = [math.tan(math.radians(step_angle + target_line_angle)), math.tan(math.radians(target_line_angle - step_angle))] return lines
38a97e3616df8046e47c70f2896290ffbc61c072
29,753
import torch def create_loss_mask(inp): """mask for the loss function""" mask = torch.all(inp == 0, dim=-1).float() return mask[:, 1:]
d46f41c5e0e0afa380474b4f6af66eba7a3fec3b
29,754
def root_generator(folder, build, variant="common"): """ Generate roots for the SHAxxx hash lookup URLs. :param folder: Dictionary of variant: loader name pairs. :type folder: dict(str: str) :param build: Build to check, 3 letters + 3 numbers. :type build: str :param variant: Autoloader variant. Default is "common". :type variant: str """ #Priv specific privx = "bbfoundation/hashfiles_priv/{0}".format(folder[variant]) #DTEK50 specific dtek50x = "bbSupport/DTEK50" if build[:3] == "AAF" else "bbfoundation/hashfiles_priv/dtek50" #DTEK60 specific dtek60x = dtek50x # still uses dtek50 folder, for some reason #Pack it up roots = {"Priv": privx, "DTEK50": dtek50x, "DTEK60": dtek60x} return roots
394d824ce1c87eb3ea32d2ce1b0bb3ad72ef648d
29,755
import argparse def parse_arguments(args_to_parse): """ Parse CLI arguments """ descr = 'Train a baseline model' parser = argparse.ArgumentParser(description=descr) general = parser.add_argument_group('General settings') general.add_argument('name', type=str, help="The name of the experimental directory - used for saving and loading.") general.add_argument( '--input-data-dir', type=str, required=True, help="The name of the directory from which to load the pre-processed data", ) general.add_argument( "--stemmer-path", type=str, required=True, help="Path to the SALAMA stemming dictionary", ) general.add_argument( '--model', type=str, default='tf-idf', choices=['tf-idf', 'count', 'doc2vec', 'fasttext'], help='Select the model type to use before feeding into a logistic regression layer', ) general.add_argument("--seed", type=int, default=12321, help='Random seed for reproducability') training = parser.add_argument_group('Training settings') training.add_argument( '--train-set-label-proportion', type=float, default=0.2, choices=[0.01, 0.05, 0.1, 0.2], help='Ratio of nodes in the training set which we keep labelled', ) # CLI options of the form `--doc2vec-XXXX` pertain to doc2vec training.add_argument( '--doc2vec-epochs', type=int, default=10, help="The number of epochs to run when training Doc2Vec", ) training.add_argument( '--doc2vec-feature-dims', type=int, default=300, help="The Doc2vec feature vector size", ) training.add_argument( '--doc2vec-dm', type=int, choices=[0, 1], default=1, help="The training regime to use for Doc2Vec: Distributed Memory (1) or Distributed Bag of Words (0)", ) return parser.parse_args(args_to_parse)
24dc112cdb8b4ae46a8838c91f30b98355fea47f
29,759
def countDigits(n): """Count the number of digits in a given number "n". """ num = len(str(abs(n))) return num
b8ecf5117f45a425b1bb0ced8b01f209cfdaca20
29,763
def safe_issubclass(cls, classinfo): """As the builtin issubclass, but returns False instead of a TypeError if the first argument is not a class.""" try: return issubclass(cls, classinfo) except TypeError: # cls is not actually a class return False
4e4218b51824d68a58f7aeca360845bbb12ba40a
29,764
def calculate_aggregates_day_wise(docs_df): """ Takes a Pandas data frame with index=published_date, cols: num_occurrences and arxiv_identifier as input, calculates the no. of unique and total occurrences by grouping by published_date and cacluating the count and sum on the column num_occurrences. The aggregate results are suitably renamed and the published_date index is reset so that it becomes a column in the output dataframe. NOT USED CURRENTLY""" agg_df = docs_df.groupby('published_date').num_occurrences.agg(['sum','count']).rename( columns={'sum':'total_occurrences','count':'unique_occurrences'}).reset_index() #agg_df.sort_values(by='total_occurrences', ascending=False) return agg_df
adf67c53c6366d524bc68ae298472cbff8352091
29,765
def get_news_url(tweet): """ searchs and returns the link in the tweet Args: tweet: tweet as tweepy object Returns: url if exists """ print("Searching for URL in the tweet..") try: return tweet.entities.get('urls')[0].get('expanded_url') except: print("Url is missing..") print("Tweet: {}".format(tweet)) return None
b18f6011ca7eae7d114c65ac593d7af03df8390c
29,768
from typing import Optional def doc_to_doc_string(doc: Optional[str], indent_level: int = 0) -> str: """Generate a documentation string from a schema salad doc field.""" lead = " " + " " * indent_level + "* " if doc: doc_str = "\n".join([f"{lead}{line}" for line in doc.split("\n")]) else: doc_str = "" return doc_str
efd0237f5d39660459e8b736e74ec24bc665ef77
29,770
def safe_div(x, y): """ This function divides two numbers and avoids division by zero :param x: x-value :param y: y-value :return: result """ if y < 1.e-5: y = 1.e-5 if x < 1.e-5: x = 1.e-5 return x / y
1ce30f2f9f5616d8ff8b4c7f29aad3bcbd51201d
29,771
def get_evinfo(act_ptrn): """ [概要] アクション実行パターン別にテストで使用するイベント情報を返却 [引数] act_ptrn : 1=OPERATION_ID, 2=SERVER_LIST, 3=MENU_ID """ event_info = '{"EVENT_INFO":["mas-pj-dev","httpd"]}' if act_ptrn == 3: event_info = '{"EVENT_INFO":["mas-pj-dev","httpd","date"]}' elif act_ptrn == 4: event_info = '{"EVENT_INFO":["18:09 TEST825 E (対象ノード= mas-pj-dev )( SystemScope(0000):pcheck001[999]: Process [/opt/sample/bin/aaa ,pid=7203]Down TEST001 )TEST002"]}' return event_info
8f3cec9d5a085f203f3cde317c7d148686c94f9f
29,774
def roi_corners(roi): """ roi_corners = [top_left, top_right, bottom_left, bottom_right] """ return [ roi[0], [roi[1][0], roi[0][1]], [roi[0][0], roi[1][1]], roi[1] ]
9eaef99257411f3e4bb06c21647ee116305e715a
29,775
def unzip(zipped): """ Unzip a zipped list :param list-of-tuple zipped: list of tuples to be disaggregated :returns: list of tuples Example _______ >>> unzip([(1, 2, 3), (4, 5, 6), (7, 8, 9)]) [(1, 4, 7), (2, 5, 8), (3, 6, 9)] """ return list(zip(*zipped))
0ac5d80574eaeb5e84417dbfed9e617cde60d101
29,777
def getCollectionKeys(collection): """Get a set of keys from a collection""" keys_list = [] collection_list = collection.find().limit(300) #otherwise pulls all records just to get schema which is bit wasteful. Yea, may miss some field names, but it's good compromise for document in collection_list: for field in document.keys(): keys_list.append(field.lower()) keys_set = set(keys_list) return keys_set
edc02521e87ec40a4731887cb05d0df6afbd2b49
29,778
def can_manage_content(user, content): """ Returns True if user is staff or instructor(owner). """ if user.is_authenticated(): owner = user == content.course.owner if user.is_staff or owner: return True return False
bb4d2209155b5df1458f3194737d25a4a9b1a7c3
29,779
from typing import List from typing import Tuple def extract_node_waypt(way_field: List[Tuple[str, str]]) -> int: """ Given a list with a reference node such as [('ref', '0')], extract out the lane ID. Args: way_field: key and node id pair to extract Returns: node_id: unique ID for a node waypoint """ key = way_field[0][0] node_id = way_field[0][1] assert key == "ref" return int(node_id)
564a738d0fdecb5230b9bb757764f4463dee85e0
29,780
import functools import traceback import sys def suppress_broken_pipe_msg(f): """ When using the CLI as part of a script pipeline, we want to gracefully handle the next command closing the pipe early (eg head). This is a workaround for the fact that python prints an error message to the console even when an error is correctly handled. https://stackoverflow.com/questions/14207708/ioerror-errno-32-broken-pipe-python """ @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except SystemExit: raise except Exception: traceback.print_exc() exit(1) finally: try: sys.stdout.flush() finally: try: sys.stdout.close() finally: try: sys.stderr.flush() finally: sys.stderr.close() return wrapper
752c4b8fc878e2ae2cb735f0e259d703653c7531
29,781
from typing import List def create_subdict(data: List): """ Return data in List form as dictionary, Start, to which model parameters is appended. Parameters ----------- data: List Data (potentially pca component output in 2d form). Return ------- Dict: Dictionary with 'data' key. """ return {'data' : data}
6850588d1b1eeae69265aa1a63377ee2fa759b80
29,782
def get_suggestion_string(sugg): """Return the suggestion list as a string.""" sugg = list(sugg) return ". Did you mean " + ", ".join(sugg) + "?" if sugg else ""
7eb93d747818b66f5996241c993fbbe2ab638dd7
29,783
def str_to_class(s): """Alternate helper function to map string class names to module classes.""" lst = s.split(".") klass = lst[-1] mod_list = lst[:-1] module = ".".join(mod_list) try: mod = __import__(module) if hasattr(mod, klass): return getattr(mod, klass) else: return None except ImportError: return None
7ffd66b1a9032980fdc09fbec86fd914b84d52c7
29,785
def with_(context, *args): """:yaql:with Returns new context object where args are stored with 1-based indexes. :signature: with([args]) :arg [args]: values to be stored under appropriate numbers $1, $2, ... :argType [args]: chain of any values :returnType: context object .. code:: yaql> with("ab", "cd") -> $1 + $2 "abcd" """ for i, t in enumerate(args, 1): context[str(i)] = t return context
a85dea242ee315e5c238084cc9a563317ddfe976
29,786
def removeDoubleVertexesAlternative(V): """ alternativte removing of doublefaces. Hopefully more memory efficient """ X = range(len(V)) # Vs = [v for (v, x) in VIsorted] # Is = [x for (v, x) in VIsorted] Vs = [] Is = [0]*len(V) prevv = None i = 0 for [v, x] in sorted(zip(V, X)): if v == prevv: # prev index was increased Is[x] = i - 1 else: Vs.append(v) Is[x] = i i = i + 1 prevv = v return Vs, Is
d9722e481b38bae0703f5f63982e39d5c2364bd0
29,787
def get_key(text): """return keyword: first word of text, isolating keywords followed by '(' and ':' """ t = text.replace('(', ' (').replace(':', ' :').strip() return t.split(' ', 1)[0].strip()
e27e76b97e6b9247f24bd62c074787f6d140f688
29,788
def solution_two(numbers: list[int]) -> list[int]: """ timeComplexity: BigO(2n) spaceComplexity: BigO(n) """ total = 1 for n in numbers: total *= n result = [] for n in numbers: result.append(total // n) return result
ceba8dddf112299b9856b2099fa6f679db28f225
29,789
from typing import Union from typing import List def read_file(file_path: str, split: bool = False) -> Union[str, List[str]]: """ Reads a text file. >>> from snakypy import helpers >>> file = '/tmp/my_file.txt' >>> helpers.files.create_file('My content file', file, force=True) True >>> helpers.files.read_file(file) 'My content file' Args: file_path (str): You must receive the full/absolute file path. split (bool): If this option is True, a list will be returned where the breaks will be made using line skips. (default: {False}) Returns: [str|list]: By default it returns a string. If the option split=True, a list of line breaks will be returned. """ try: with open(file_path) as f: if split: return f.read().split("\n") return f.read() except FileNotFoundError as err: raise FileNotFoundError(f'>>> File "{file_path}" does not exist. {err}')
af2429f9d696a693b89c0fa33200e453906ee0c8
29,791
def max_sublist(a_list): """ Kadane's Algorithm >>> max_sublist([-2, 1, -3, 4, -1, 2, 1, -5, 4]) (6, 3, 6) >>> max_sublist([0, -1, 2,- 3, 5, 9, -5, 10]) (19, 4, 7) :param a_list: The list to get the maximum sub-list for. :return: The sum from the sublist, the start index, and the end index. The last two are for testing. """ max_ending_here = max_so_far = a_list[0] current_index = 0 start_index = 0 end_index = 0 for num in a_list: max_ending_here = max(0, max_ending_here + num) if max_ending_here >= max_so_far: end_index = current_index if max_ending_here == 0: start_index = current_index + 1 max_so_far = max(max_so_far, max_ending_here) current_index += 1 return max_so_far, start_index, end_index
e535a182e0a0118395dfd6f6e130a42d3c4b051f
29,795
import random def generate_social_security_number(): """Randomly generate a social security number. For example: '234 78 9012' Update to reflect state, date of birth info consider: http://www.pnas.org/content/106/27/10975.full.pdf """ number1 = random.randint(1,999) assert number1 > 0 number2 = random.randint(1,99) assert number2 > 0 number3 = random.randint(1,9999) assert number3 > 0 ss_str = str(number1).zfill(3)+' '+str(number2).zfill(2)+' '+ \ str(number3).zfill(4) assert len(ss_str) == 11 return ss_str
4bdef47477dae586683ad7a2487ca09852baf57b
29,796
def get_fold(dict_actors_per_fold, actor_id): """ Get the fold that the actor belongs to. :param dict_actors_per_fold[dict] Map with the fold number as key (ranging from 1 to 5) and the list of actors in that test fold as the value of the dict. :param actor_id:[int] Id of the actor. We want to obtain the fold where it belongs """ for fold_key in list(dict_actors_per_fold.keys()): if(actor_id in dict_actors_per_fold[fold_key]): return fold_key
0b5543ed37cd4fc8bed84a7f269fca04a5d71d01
29,797
def getModel(tsinput): """ This is the wrapper function for all turbulence models implemented in the runInput package. Parameters ---------- tsinput : :class:`tscfg <.base.tscfg>` A TurbSim input object. Returns ------- specModel : A subclass of :class:`.specModelBase` The appropriately initialized 'spectral model' object specified in `tsinput`. cohereModel : A subclass of :class:`.cohereModelBase` The appropriately initialized 'coherence model' object specified in `tsinput`. stressModel : A subclass of :class:`.stressModelBase` The appropriately initialized 'stress model' object specified in `tsinput`. """ # This executes the sub-wrapper function (defined below) specified # in the tsinput-object (input file TurbModel line) return eval('_' + tsinput['TurbModel'].lower() + '(tsinput)')
c745ca7127b92f47aa13e2965b37f7d57dd6d588
29,798
def filter_cov_id(pid, cov, *df): """Filters the passed dataframe, returning only rows that meet passed percentage identity and coverage criteria. """ return tuple([d[(d.percentage_identity > pid) & (d.query_coverage > cov) & (d.subject_coverage > cov)] for d in df])
7373d1c3d54e219cbcc1c307fd9808d924fc3ae2
29,800
def intToBinaryString(integer : int) -> str: """Convert an integer to a string representing a big endian binary number Parameters ---------- integer : int A positive integer Returns ------- str A string representing a big endian binary number """ rtn = "" while integer > 0: rtn += str(integer % 2) integer = integer // 2 return rtn[::-1]
5f0d3aba71c7fffaa14725fef0576b72ae17b461
29,803
def dista10(n): """ seja um inteiro n retorna True se a diferença absoluta entre n e 100 ou n e 200 for menor ou igual a 10 dista10(93) -> True dista10(90) -> True dista10(89) -> False """ if abs(n - 100) <= 10 or abs(n - 200) <= 10: return True return False
f575451e55a511c436fe3c70c915c3cddc98ddd9
29,804
def filter_capabilities_by_languages(bears, languages): """ Filters the bears capabilities by languages. :param bears: Dictionary with sections as keys and list of bears as values. :param languages: Languages that bears are being filtered on. :return: New dictionary with languages as keys and their bears capabilities as values. The capabilities are stored in a tuple of two elements where the first one represents what the bears can detect, and the second one what they can fix. """ languages = set(language.lower() for language in languages) language_bears_capabilities = {language: ( set(), set()) for language in languages} for section_bears in bears.values(): for bear in section_bears: bear_language = ( ({language.lower() for language in bear.LANGUAGES} | {'all'}) & languages) language = bear_language.pop() if bear_language else '' capabilities = (language_bears_capabilities[language] if language else tuple()) language_bears_capabilities.update( {language: (capabilities[0] | bear.can_detect, capabilities[1] | bear.CAN_FIX)} if language else {}) return language_bears_capabilities
3f5661f6d72579b0fcd8ce5b6e055f0fefce3dcc
29,806
import os def get_config_folders(): """Read folder config from environment""" folders = [] for env_key, env_val in os.environ.items(): if env_key.startswith('FOLDER_'): folder_id = env_key.split('_', 1)[1] folder_name = env_val folder_extension = os.getenv(f'EXTENSION_{folder_id}') if folder_extension: folders.append({ 'name': folder_name, 'extension': folder_extension }) return folders
d18952397fabcad11eac26c9ba28ee3de9ffbd31
29,807
import os def filename_maker(name: str, path: str, start_date, end_date) -> str: """ Create the name of the main files Returns: String with the path to save the file. """ return os.path.join(path, name + start_date.replace('-','') + '_' + end_date.replace('-',''))
996683de49edb626f5d2974da3af26030ca6eb46
29,809
def convert_aa_code(three_letter, convert): """ Assumes a string that contains a three letter aminoacid code and returns the corresponding one letter code. """ aa_code = { 'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N', 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', } non_canonical = { 'MSE': 1, 'HYP': 2, 'MLY': 3, 'SEP': 4, 'TPO': 5, 'CSO': 6, 'PTR': 7, 'KCX': 8, 'CME': 9, 'CSD': 10, 'CAS': 11, 'MLE': 12, 'DAL': 13, 'CGU': 14, 'DLE': 15, 'FME': 16, 'DVA': 17, 'OCS': 18, 'DPR': 19, 'MVA': 20, 'TYS': 21, 'M3L': 22, 'SMC': 23, 'ALY': 24, 'CSX': 25, 'DCY': 26, 'NLE': 27, 'DGL': 28, 'DSN': 29, 'CSS': 30, 'DLY': 31, 'MLZ': 32, 'DPN': 33, 'DAR': 34, 'PHI': 35, 'IAS': 36, 'DAS': 37, 'HIC': 38, 'MP8': 39, 'DTH': 40, 'DIL': 41, 'MEN': 42, 'DTY': 43, 'CXM': 44, 'DGN': 45, 'DTR': 46, 'SAC': 47, 'DSG': 48, 'MME': 49, 'MAA': 50, 'YOF': 51, 'FP9': 52, 'FVA': 53, 'MLU': 54, 'OMY': 55, 'FGA': 56, 'MEA': 57, 'CMH': 58, 'DHI': 59, 'SEC': 60, 'OMZ': 61, 'SCY': 62, 'MHO': 63, 'MED': 64, 'CAF': 65, 'NIY': 66, 'OAS': 67, 'SCH': 68, 'MK8': 69, 'SME': 70, 'LYZ': 71 } if three_letter in aa_code.keys(): return aa_code[three_letter] elif convert and (three_letter in non_canonical.keys()): return non_canonical[three_letter] else: return '-'
84cf3b1b383f3c6559dee6d212c0e2265c2f97a0
29,811
def expanding_mean(new_value, old_value, time): """ Args: new_value: this time step's value old_value: aggregate from last time step time: current time, zero-indexed Returns: mean from t = 0 : time """ time_1plus = time + 1 return (1. / time_1plus) * new_value + (time_1plus - 1.) / time_1plus * old_value
9f701473a9904e48ec042983a8bedc2e27a464ba
29,813
def _gradient_normalization(grad, non_zero, centralize_gradients=True, normalize_gradients=True): """ substract the mean from the gradient and divide it by its standard deviation `non_zero` is a function that takes an input and insures that it will not be zero or negative """ can_centralize = centralize_gradients and (grad.ndim > 1) can_normalize = normalize_gradients and (grad.size > 2) if can_centralize or can_normalize: # takes into account the fact that the gradient might be 1D keepdims = (grad.ndim > 1) axis = tuple(range(1, grad.ndim)) if keepdims else None # substract the mean from the gradient grad_mean = grad.mean(axis=axis, keepdims=keepdims) grad -= grad_mean if can_normalize: # divide the centralized gradient by its standard deviation grad_std = grad.std(axis=axis, keepdims=keepdims) grad /= non_zero(grad_std) # we divide *after* subtracting the mean # add the mean back to the gradient if we don't want to centralize it if not can_centralize: grad += grad_mean return grad
82a73e90759ec00e49e4dde1797377362819683d
29,815
from typing import Callable from typing import Iterable def sort_by_reversed(key: Callable): """Return a new list containing all items from the iterable in descending order, sorted by a key. >>> sort_by_reversed(lambda x: x % 10)([2231, 47, 19, 100]) [19, 47, 2231, 100] """ def sort_by_reversed(seq: Iterable): return sorted(seq, key=key, reverse=True) return sort_by_reversed
711df771751590a6315deefb075d9a436743cacb
29,816
from multiprocessing import Process, Queue import traceback import functools def pl_multi_process_test(func): """Wrapper for running multi-processing tests.""" @functools.wraps(func) def wrapper(*args, **kwargs): queue = Queue() def inner_f(queue, **kwargs): try: func(**kwargs) queue.put(1) except Exception: traceback.print_exc() queue.put(-1) proc = Process(target=inner_f, args=(queue,), kwargs=kwargs) proc.start() proc.join() result = queue.get() assert result == 1, 'expected 1, but returned %s' % result return wrapper
4b75d5734ae1f2ee57111e2bec9b5fef40c12bad
29,817
import itertools def is_sorted(iterable, *, key=None, reverse=False, strict=False): """Returns true if iterable is sorted Parameters ---------- iterable : iterable The iterable to check for sorted-ness. key : x -> y, optional Apply mapping function key to iterable prior to checking. This can be done before calling, but this ensures identical calls as sorted. reverse : bool, optional `key` and `reverse` function as they for `sorted`""" if key is not None: iterable = map(key, iterable) ait, bit = itertools.tee(iterable) next(bit, None) # Don't throw error if empty if strict and reverse: # pylint: disable=no-else-return return all(a > b for a, b in zip(ait, bit)) elif reverse: return all(a >= b for a, b in zip(ait, bit)) elif strict: return all(a < b for a, b in zip(ait, bit)) else: return all(a <= b for a, b in zip(ait, bit))
682d0026193a3189bcee7e693f8ed28d5c18d10b
29,819
from typing import Dict from typing import Tuple def _has_all_valid_descriptors(peak_descriptors: Dict[str, float], filters: Dict[str, Tuple[float, float]]) -> bool: """ Check that the descriptors of a peak are in a valid range. aux function of get_peak_descriptors. Parameters ---------- peak_descriptors : dict Dictionary from descriptors names to values. filters : dict Dictionary from descriptors names to minimum and maximum acceptable values. Returns ------- is_valid : bool True if all descriptors are inside the valid ranges. """ res = True for descriptor, (lb, ub) in filters.items(): d = peak_descriptors[descriptor] is_valid = (d >= lb) and (d <= ub) if not is_valid: res = False break return res
c797949c6faa5f401abfaf020c64b55e2e41c544
29,820
import torch def dist2(feat_a, feat_b): """ Args: feat_a: the feature of image_a feat_b: a list of features, on a list of images Return: dist: a list, which means a list of distances between feat_a and each one in feat_b """ dist=[] feat_a = torch.FloatTensor(feat_a) for i in range(len(feat_b)): feat_i = torch.FloatTensor(feat_b[i]) dist_i = torch.norm(feat_a - feat_i, 2) dist.append(dist_i) return dist
c26ef4f337f0d4248f61f03cbf962b0187a4f7c6
29,821
import argparse def validate_color_temperature(s: str) -> int: """Validate color temperature argument.""" try: value = int(s) except ValueError: raise argparse.ArgumentTypeError(f"{s} is not an integer") if value < 2900 or value > 7000: raise argparse.ArgumentTypeError( "color temperature must be between 2900 and 7000" ) if value % 100 != 0: raise argparse.ArgumentTypeError("color temperature must be divisible by 100") return value
2f462e7faa32bba34e4b26017e844edd61644904
29,824
import re def rlist_to_int(string): """Takes an rlist string as input, and outputs a corresponding integer""" rlist = 0 string = re.sub(r"r|R", "", string) for m in re.finditer(r"[^,]+", string): args = m.group().split("-") if len(args) == 1: lo,hi = args*2 else: lo,hi = args rlist |= 2**(int(hi) + 1) - 2**int(lo) return rlist
782f01cdaee0e2c51ff6dfcd281c5d83e1a3e98b
29,825
def interpolate_template(template, context): """Interpolate context into a template. (i.e. replace things in strings without % replacement). Args: template (str) : String to fill with context context (dict) : Dictionary of variables Returns: str : Populated template """ populated = template for key, value in context.items(): _key = '{%s}' % key populated = populated.replace(_key, str(value)) return populated
6c9151075c9fa836b340ee163452de47eec8fe2c
29,826
def accepts(source): """ Test if source matches a Twitter handle """ # If the source equals the plugin name, assume a yes if source["type"] == "twitter": return True # Default to not recognizing the source return False
03ce273a924736ac68d73b2f68a4dada9b412c8a
29,827
def get_qualifiers(qualifiers): """ get qualifiers, if exist and returns a dict of "qualifier_name: data" - pairs """ qualifiers_dict = dict() if qualifiers is not None: qualifiers = qualifiers if isinstance(qualifiers, list) else [qualifiers] for qualifier in qualifiers: qualifiers_dict.update({qualifier.get('@name'): qualifier.get('#text')}) return qualifiers_dict else: return None
d0f354d709c11ea40b212e2baaaef5762a3ad5c2
29,828
def make_enum_pair(ele, till, delimiter=", "): """ Create list of pair of strings with consecutive numbering as follows: make_enumerated_pair('P%d p%d', 3) -> 'P1 p1, P2 p2, P3 p3' """ return delimiter.join(ele % (i + 1, i + 1) for i in range(till))
66b9410f307027ef151dd81edb7b29998ef65628
29,829
def convert_dtw_struc_dist(distances, start_layer=1): """ Convert :param distances: dict of dict :param start_layer: :return: """ for vertices, layers in distances.items(): keys_layers = sorted(layers.keys()) start_layer = min(len(keys_layers), start_layer) for layer in range(0, start_layer): keys_layers.pop(0) for layer in keys_layers: layers[layer] += layers[layer - 1] return distances
8c62ab92e565509625bbef293bcc4d05870e1754
29,830
def average(keyword, values): """ calculate average of all values at values[+].keyword and return :param keyword: the key of the value you care about :param values: [{key: value},...] array of dicts, values assumed numeric :return: single averaged value """ average = 0.0 for val in values: average += float(val[keyword]) if len(values) > 0: average = average / len(values) return average
aed36e90a485e4f433de7330f920167dd0e558d5
29,831
import os def _file_size_is_valid(sequence_file, minimum_file_size): """ Performs a os.path.getsize operation on the sequence file(s) on the sequence_file object if any files are less than the file minimum, return false, else return true :param sequence_file: SequenceFile object :param minimum_file_size: integer representing filesize in KB :return: boolean """ for file_name in sequence_file.file_list: file_size_kb = os.path.getsize(file_name) / 1024 if file_size_kb <= minimum_file_size: return False return True
edb2d7ed33aedc7cef18d63a2b468e6982aa7897
29,833
def can_collapse(row): """ returns true if the given row can be collapsed to the left can also be a column """ for a,b in zip(row[1:], row): if a==0: continue if b==0 or a==b: return True return False
50a26acd3ea27049fd2be3d60860a1f10bf999ed
29,835
def transition_required(*transitions): """ Permission factory that requires that current user has access to at least one of the transitions and at the same time has ``required`` permission (or factory). If the required permission is not set, it is considered as true. If it is callable, it is called at first to get the Permission (object with .can() method). """ def factory(record, *_args, **_kwargs): def can(): available_user_transitions = record.available_user_transitions() for t in transitions: if t in available_user_transitions: return True return False return type('TransitionRequiredPermission', (), {'can': can}) return factory
96e273057360c1c03ea78a6f1618b7900abbb128
29,836
def get_count(inputStr): """ Return the number (count) of vowels in the given string. We will consider a, e, i, o, and u as vowels for this Kata. The input string will only consist of lower case letters and/or spaces. :param inputStr: input string value. :return: the number (count) of vowels in the given string. """ return sum(1 for i in inputStr if i in ["a", "e", "i", "o", "u"])
f4700b0c93175b4b3553f16e27384a3ee3481c60
29,838
import random def random_bit_value(): """Returns random bit""" return random.randint(0, 1)
3627c3ef2246f7913ce14f5ebf31b7b56ebff461
29,840
def fractional_basal_area_by_species(data): """For group of trees, calculate the fraction total basal area represented by each species""" # cast to str so can store sparsely :) fractional_ba = ( ( data.groupby(data["SPCD"].astype(str)).unadj_basal_area.sum() / data.unadj_basal_area.sum() ) .round(4) .dropna() ) return fractional_ba.to_dict()
ea8e8dc4ee75c6756eb48acd18e0e22d8198d6f2
29,841
def subDict(somedict, somekeys, default=None): """ Returns subset of a dictionary with keys from somekeys """ return dict([ (k, somedict.get(k, default)) for k in somekeys ])
17b95ee28986ca2c2f02e3405fc71c76a14fbbc9
29,842
def qset_cmb_box(cmb_box, string, data=False): """ Set combobox to the index corresponding to `string` in a text field (data = False) or in a data field (data=True). When `string` is not found in the combobox entries, select the first entry. Signals are blocked during the update of the combobox. Returns: the index of the found entry """ if data: idx = cmb_box.findData(str(string)) # find index for data = string else: idx = cmb_box.findText(str(string)) # find index for text = string ret = idx if idx == -1: # data does not exist, use first entry instead idx = 0 cmb_box.blockSignals(True) cmb_box.setCurrentIndex(idx) # set index cmb_box.blockSignals(False) return ret
f2a53bca106fe160d04142ebf0fdfb2de966a98d
29,843
def parse_f90idx(tokens, t, prior_t): """Parse Fortran vector indices into a tuple of Python indices.""" idx_end = (',', ')') v_name = prior_t v_indices = [] i_start = i_end = i_stride = None # Start index t = next(tokens) try: i_start = int(t) t = next(tokens) except ValueError: if t in idx_end: raise ValueError('{0} index cannot be empty.' ''.format(v_name)) elif not t == ':': raise # End index if t == ':': t = next(tokens) try: i_end = 1 + int(t) t = next(tokens) except ValueError: if t == ':': raise ValueError('{0} end index cannot be implicit ' 'when using stride.' ''.format(v_name)) elif not t in idx_end: raise elif t in idx_end: # Replace index with single-index range if i_start: i_end = 1 + i_start # Stride index if t == ':': t = next(tokens) try: i_stride = int(t) except ValueError: if t == ')': raise ValueError('{0} stride index cannot be ' 'implicit.'.format(v_name)) else: raise if i_stride == 0: raise ValueError('{0} stride index cannot be zero.' ''.format(v_name)) t = next(tokens) if not t in idx_end: raise ValueError('{0} index did not terminate ' 'correctly.'.format(v_name)) idx_triplet = (i_start, i_end, i_stride) v_indices.append((idx_triplet)) t = next(tokens) return v_indices, t
a3b9f6b3c492e4c31a3c7d3d66c3779ea76d7449
29,846
from typing import Iterator import itertools def decimal(start: int = 1) -> Iterator[int]: """ Increments from `start`. e.g. 1, 2, 3, .. 9, 10, 11, etc. Args: start: The first value to start with. """ return itertools.count(start)
5c0a4b75b45c391597091e83c4b3991ba64d6626
29,847
def lunar_add(n, m): """lunar addition""" sn, sm = str(n), str(m) l = max(len(sn), len(sm)) return int("".join(max(i, j) for i, j in zip(sn.rjust(l, "0"), sm.rjust(l, "0"))))
eb6c62c4a72176615b398e9b4c04535ec9fa3d2e
29,848
def get_rendered(font, text, color, cache): """Simple font renderer that caches render.""" if text in cache: image = cache[text] else: image = font.render(text, 0, color) cache[text] = image return image
13557f705882da5865812a0be86cf8b46b95e202
29,849
def adv_word_probs_for_rev(rev, td): """ Return adversarial word probabilities for the reviewer """ # Get topics for the reviewer rev_top_list = td.rev_top[rev.name()] wds_prob = {} for t_id, t_prob in rev_top_list[:3]: # Get words contributing to each topic t_wds = td.top_wds[t_id] for w, w_prob in t_wds[:10]: if w not in wds_prob: wds_prob[w] = 0 # Weight each word by the topic's probability prob = t_prob wds_prob[w] = max(wds_prob[w], prob) s = 0 for w in wds_prob: s += wds_prob[w] for w in wds_prob: wds_prob[w] = wds_prob[w] / s return wds_prob
24baf7bcdb5655881f56c9abb6c889884c926347
29,850
import math def angle_to(tup1,tup2): """The angle to tup2 from tup1, measured against 'straight up' clockwise, in radians. Tuples are given in a coordinate system with (0,0) top left, (1,0) is one step right, (0,1) is one step down, and (1,1) is down right. >>> angle_to((1,1),(1,0))/math.pi 0.0 >>> angle_to((1,1),(2,0))/math.pi 0.25 >>> angle_to((1,1),(1,2))/math.pi 1.0 >>> angle_to((1,1),(-1,-1))/math.pi 1.75 """ x = tup2[0] - tup1[0] y = tup1[1] - tup2[1] angle_to_positive_x_axis = math.atan2(y,x) angle_to_straight_up = (math.pi/2) -angle_to_positive_x_axis positive_angle_to_straight_up = angle_to_straight_up % (2*math.pi) return positive_angle_to_straight_up
edece575727f69d716ffd217807b4b794a6b7a1c
29,851
def OkErrMiss(o): """ compute Ok, errors, missed from the ResultStructure object return a triple (ok, err, miss) """ ok = o.truePos err = o.resTotal - ok miss = o.gtTotal - ok return ok, err, miss
11f3958272d1fdbb34621c5d0ad7700193b9a2f9
29,852
def solution(A): """ DINAKAR Idea is xor of two same numbers produces 0 x = 3 (011) and y = 3 (011) is 000 at the end single numbers left in xor variable. :return: """ xor = 0 for item in A: xor ^= item return xor
2f59e3ffdfc9c429b96d1273850c633d90b77f1e
29,853
import re def filter_partheading(line): """ Removes 'part 4 / The New World' :param line: :return: """ line = line.strip() m = re.search("part \d+ \/", line) if m: return None return line
fa0e51c60eb19870ef186112b3bcbcd181bc71f0
29,855
import logging def featurize_data(model, array): """ Given a model and an array, perform error checking and return the prediction of the full feature array. Parameters: ---------- model : keras.models.Model The featurizer model performing predictions array : np.ndarray The vectorized array of images being converted into features Returns: -------- full_feature_array : np.ndarray A numpy array containing the featurized images """ # Raise error if the array has the wrong shape if len(array.shape) != 4: raise ValueError('Image array must be a 4D tensor, with dimensions: ' '[batch, height, width, channel]') # Perform predictions logging.info('Creating feature array.') # NOTE: No clue why this is here, it's to make the models note break due to # Keras update: https://github.com/keras-team/keras/issues/9394 model.compile('sgd', 'mse') full_feature_array = model.predict(array, verbose=1) # Return features logging.info('Feature array created successfully.') return full_feature_array
1b8796ace6f3a72a288b6621f01e62c90a06a769
29,856
def init_params(): """Set up the simulation parameters.""" p = {} p['endYear'] = 2040 p['statsCollectFrom'] = 1990 p['numberPolicyParameters'] = 4 p['numberScenarios'] = 9 p['implementPoliciesFromYear'] = 2020 p['discountingFactor'] = 0.03 return p
322731e35a31b5d44351ca743aff14a353f08301
29,857
def get_vector(model, line): """ 获得文章的向量 :param final: list :return: final: list """ vec = model.infer_vector(line) return vec
740791cd8773beeed4ebbee066dfbe6830694685
29,858