content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_state(module_instance, incremental_state, key_postfix): """ Helper for extracting incremental state """ if incremental_state is None: return None full_key = _get_full_key(module_instance, key_postfix) return incremental_state.get(full_key, None)
b3ba8f10fd26ed8878cb608076873cad52a19841
3,647,650
def get_lenovo_urls(from_date, to_date): """ Extracts URL on which the data about vulnerabilities are available. :param from_date: start of date interval :param to_date: end of date interval :return: urls """ lenovo_url = config['vendor-cve']['lenovo_url'] len_p = LenovoMainPageParser(lenovo_url, from_date, to_date) len_p.parse() return len_p.entities
503f078d9a4b78d60792a2019553f65432c21320
3,647,651
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """ Computes mean and std for batch then apply batch_normalization on batch. # Arguments x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. # Returns A tuple length of 3, `(normalized_tensor, mean, variance)`. """ if gamma is None: gamma = ones_like(x) if beta is None: beta = zeros_like(x) mean = av.ops.reduce_mean(x, reduction_axes, True) variance = av.ops.reduce_mean(av.ops.square(x - mean), reduction_axes, True) normalized_tensor = batch_normalization( x, mean, variance, beta, gamma, axis=reduction_axes, epsilon=epsilon) return normalized_tensor, mean, variance
2c1cc9368438cbd62d48c71da013848068a7664e
3,647,652
def itemAPIEndpoint(categoryid): """Return page to display JSON formatted information of item.""" items = session.query(Item).filter_by(category_id=categoryid).all() return jsonify(Items=[i.serialize for i in items])
33abd39d7d7270fe3b040c228d11b0017a8b7f83
3,647,654
def command(settings_module, command, bin_env=None, pythonpath=None, *args, **kwargs): """ run arbitrary django management command """ da = _get_django_admin(bin_env) cmd = "{0} {1} --settings={2}".format(da, command, settings_module) if pythonpath: cmd = "{0} --pythonpath={1}".format(cmd, pythonpath) for arg in args: cmd = "{0} --{1}".format(cmd, arg) for key, value in kwargs.iteritems(): if not key.startswith("__"): cmd = '{0} --{1}={2}'.format(cmd, key, value) return __salt__['cmd.run'](cmd)
6f7f4193b95df786d6c1540f4c687dec89cf01a6
3,647,655
def read_input(fpath): """ Read an input file, and return a list of tuples, each item containing a single line. Args: fpath (str): File path of the file to read. Returns: list of tuples: [ (xxx, xxx, xxx) ] """ with open(fpath, 'r') as f: data = [line.strip() for line in f.readlines()] rows = [tuple(map(int, d.split())) for d in data] columns = format_into_columns(data) return rows, columns
ceeb418403bef286eda82ba18cd0ac8e4899ea4f
3,647,656
def find_level(key): """ Find the last 15 bits of a key, corresponding to a level. """ return key & LEVEL_MASK
30c454220e6dac36c1612b5a1a5abf53a7a2911c
3,647,658
def _whctrs(anchor): """return width, height, x center, and y center for an anchor (window).""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr
e1a6ff1745aac77e80996bfbb98f42c18af059d7
3,647,659
def filter_tiddlers(tiddlers, filters, environ=None): """ Return a generator of tiddlers resulting from filtering the provided iterator of tiddlers by the provided filters. If filters is a string, it will be parsed for filters. """ if isinstance(filters, basestring): filters, _ = parse_for_filters(filters, environ) return recursive_filter(filters, tiddlers)
25c86fdcb6f924ce8349d45b999ebe491c4b6299
3,647,660
def apply_move(board_state, move, side): """Returns a copy of the given board_state with the desired move applied. Args: board_state (3x3 tuple of int): The given board_state we want to apply the move to. move (int, int): The position we want to make the move in. side (int): The side we are making this move for, 1 for the first player, -1 for the second player. Returns: (3x3 tuple of int): A copy of the board_state with the given move applied for the given side. """ move_x, move_y = move def get_tuples(): for x in range(3): if move_x == x: temp = list(board_state[x]) temp[move_y] = side yield tuple(temp) else: yield board_state[x] return tuple(get_tuples())
b47da6ddab3bd1abf99ee558471a3696e46b8352
3,647,661
import copy from functools import reduce def merge(dicts, overwrite=False, append=False, list_of_dicts=False): """ merge dicts, starting with dicts[1] into dicts[0] Parameters ---------- dicts : list[dict] list of dictionaries overwrite : bool if true allow overwriting of current data append : bool if true and items are both lists, then add them list_of_dicts: bool treat list of dicts as additional branches Examples -------- >>> from pprint import pprint >>> d1 = {1:{"a":"A"},2:{"b":"B"}} >>> d2 = {1:{"a":"A"},2:{"c":"C"}} >>> pprint(merge([d1,d2])) {1: {'a': 'A'}, 2: {'b': 'B', 'c': 'C'}} >>> d1 = {1:{"a":["A"]}} >>> d2 = {1:{"a":["D"]}} >>> pprint(merge([d1,d2],append=True)) {1: {'a': ['A', 'D']}} >>> d1 = {1:{"a":"A"},2:{"b":"B"}} >>> d2 = {1:{"a":"X"},2:{"c":"C"}} >>> merge([d1,d2],overwrite=False) Traceback (most recent call last): ... ValueError: different data already exists at "1.a": old: A, new: X >>> merge([{},{}],overwrite=False) {} >>> merge([{},{'a':1}],overwrite=False) {'a': 1} >>> pprint(merge([{},{'a':1},{'a':1},{'b':2}])) {'a': 1, 'b': 2} >>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}])) Traceback (most recent call last): ... ValueError: different data already exists at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}] >>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}], list_of_dicts=True)) Traceback (most recent call last): ... ValueError: list of dicts are of different lengths at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}] >>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}, {"e": 4}]}], list_of_dicts=True)) {'a': [{'b': 1, 'd': 3}, {'c': 2, 'e': 4}]} """ # noqa: E501 outdict = copy.deepcopy(dicts[0]) def single_merge(a, b): return _single_merge(a, b, overwrite=overwrite, append=append, list_of_dicts=list_of_dicts) reduce(single_merge, [outdict] + dicts[1:]) return outdict
fdbde1c83f2fbcb74be5c4fb1376af4981655ad7
3,647,662
import re def compute_delivery_period_index(frequency = None, delivery_begin_dt_local = None, delivery_end_date_local = None, tz_local = None, profile = None, ): """ Computes the delivery period index of a given contract. :param frequency: The type of delivery contract (year, month, etc.) :param delivery_begin_dt_local: The beginning datetime of the delivery :param delivery_end_date_local: The end date of the delivery :param local_tz: The local timezone :param profile: The profile of the contract :type frequency: string :type delivery_begin_dt_local: pd.Timestamp :type delivery_end_date_local: pd.Timestamp :type local_tz: pytz.tzfile :type profile: string :return: The delivery period index :rtype: int """ if ( pd.isnull(delivery_begin_dt_local) or frequency == global_var.contract_frequency_unknown or frequency == global_var.contract_frequency_spread ): return global_var.contract_delivery_period_index_unknown assert tz_local assert delivery_begin_dt_local.tz.zone == (tz_local if type(tz_local) == str else tz_local.zone ), (delivery_begin_dt_local.tz.zone, tz_local, ) if frequency == global_var.contract_frequency_half_hour: ans = int('{0:0>2}{1:0>2}{2:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, delivery_begin_dt_local.hour, delivery_begin_dt_local.minute, )) elif frequency == global_var.contract_frequency_hour: ans = int('{0:0>2}{1:0>2}{2:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, delivery_begin_dt_local.hour, )) elif frequency == global_var.contract_frequency_bloc: bloc_match = re.compile(global_var.contract_profile_bloc_pattern).match(profile) hour1 = int(bloc_match.group(1)) hour2 = int(bloc_match.group(2)) assert hour1 < hour2 ans = int('{0:0>2}{1:0>2}{2:0>2}{3:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, hour1, hour2, )) elif frequency == global_var.contract_frequency_day: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_days: ans = int('{0:0>2}{1:0>2}{2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, int(( delivery_end_date_local - delivery_begin_dt_local.replace(hour = 0, minute = 0) ).total_seconds()/(3600*24)), )) elif frequency == global_var.contract_frequency_weekend: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_week: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_bow: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_month: ans = delivery_begin_dt_local.month elif frequency == global_var.contract_frequency_bom: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_quarter: ans = (delivery_begin_dt_local.month//3)+1 elif frequency == global_var.contract_frequency_season: if delivery_begin_dt_local.month == 4: ans = global_var.contract_delivery_period_index_summer elif delivery_begin_dt_local.month == 10: ans = global_var.contract_delivery_period_index_winter else: raise ValueError(frequency, delivery_begin_dt_local) elif frequency == global_var.contract_frequency_year: ans = global_var.contract_delivery_period_index_year else: raise NotImplementedError(frequency, delivery_begin_dt_local) return ans
4eb47c857a235a7db31624dc78c83f291f0ba67a
3,647,663
def make_proxy(global_conf, address, allowed_request_methods="", suppress_http_headers=""): """ Make a WSGI application that proxies to another address: ``address`` the full URL ending with a trailing ``/`` ``allowed_request_methods``: a space seperated list of request methods (e.g., ``GET POST``) ``suppress_http_headers`` a space seperated list of http headers (lower case, without the leading ``http_``) that should not be passed on to target host """ allowed_request_methods = aslist(allowed_request_methods) suppress_http_headers = aslist(suppress_http_headers) return Proxy( address, allowed_request_methods=allowed_request_methods, suppress_http_headers=suppress_http_headers)
054bcce2d10db2947d5322283e4e3c87328688cb
3,647,664
import unittest def test(): """Runs the unit tests without test coverage.""" tests = unittest.TestLoader().discover('eachday/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
3d8fcfada7309e62215fd2b3a1913ed51d5f14f8
3,647,665
async def save_training_result(r: dependency.TrainingResultHttpBody): """ Saves the model training statistics to the database. This method is called only by registered dataset microservices. :param r: Training Result with updated fields sent by dataset microservice :return: {'status': 'success'} if successful update, else http error. """ tr = get_training_result_by_training_id(r.training_id) tr.training_accuracy = r.results['training_accuracy'] tr.validation_accuracy = r.results['validation_accuracy'] tr.training_loss = r.results['training_loss'] tr.validation_loss = r.results['validation_loss'] tr.loss_config = r.results['loss_config'] tr.optimizer_config = r.results['optimizer_config'] tr.complete = True update_training_result_db(tr) return { 'status': 'success', 'detail': 'Training data successfully updated.' }
60302a3053a8be3781ad8da9e75b05aed85a5b06
3,647,666
def sort2nd(xs): """Returns a list containing the same elements as xs, but sorted by their second elements.""" xs.sort(cmp2nd) return xs
9b9ccef6794db2cfaa31492eeddb0d6344ff30e5
3,647,667
def is_one_of_type(val, types): """Returns whether the given value is one of the given types. :param val: The value to evaluate :param types: A sequence of types to check against. :return: Whether the given value is one of the given types. """ result = False val_type = type(val) for tt in types: if val_type is tt: result = True return result
4bda5ebc41aa7377a93fdb02ce85c50b9042e2c1
3,647,668
def get_online(cheat_id): """Получение онлайна чита --- consumes: - application/json parameters: - in: path name: cheat_id type: string description: ObjectId чита в строковом формате responses: 200: description: Успешный запрос 400: schema: $ref: '#/definitions/Error' """ count = 0 if cheat_id in online_counter_dict: for _ in online_counter_dict[cheat_id]: count += 1 return make_response({'online': count}), 400
bcfc0c44a0284ad298f533bdb8afd1e415be13b8
3,647,670
def grpc_client_connection(svc: str = None, target: str = None, session: Session = None) -> Channel: """ Create a new GRPC client connection from a service name, target endpoint and session @param svc: The name of the service to which we're trying to connect (ex. blue) @param target: The endpoint, associated with the service, to which the connection should direct its GRPC requests @param session: The session to associate with the connection. This object will be used to authenticate with the service """ # First, set the session and target to default values if they weren't provided session = session if session else Session() target = target if target else BLUE_ENDPOINT # Next, get the access token from the session and then embed # it into credentials we can send to the GRPC service token = session.access_token() credentials = composite_channel_credentials( ssl_channel_credentials(), access_token_call_credentials(token)) # Now, create a secure channel from the target and credentials if svc: conn = secure_channel( target = target, credentials = credentials, options = (('grpc.enable_http_proxy', 0),), interceptors = [ _header_adder_interceptor("service-name", svc), _header_adder_interceptor("x-agent", "blue-sdk-python")]) else: conn = secure_channel(target = target, credentials = credentials) # Return the connection return conn
fc805b15c1d94bcde5ac4eacfc72d854f860f95f
3,647,671
def aqi(pm25): """AQI Calculator Calculates AQI from PM2.5 using EPA formula and breakpoints from: https://www.airnow.gov/sites/default/files/2018-05/aqi-technical -assistance-document-may2016.pdf Args: - pm25 (int or float): PM2.5 in ug/m3 """ if pm25 < 0: raise ValueError("PM2.5 must be positive.") else: # round PM2.5 to nearest tenth for categorization pm25 = np.round(pm25, 1) green = { "aqi_low": 0, "aqi_hi": 50, "pm_low": 0.0, "pm_hi": 12.0 } yellow = { "aqi_low": 51, "aqi_hi": 100, "pm_low": 12.1, "pm_hi": 35.4 } orange = { "aqi_low": 101, "aqi_hi": 150, "pm_low": 35.5, "pm_hi": 55.4 } red = { "aqi_low": 151, "aqi_hi": 200, "pm_low": 55.5, "pm_hi": 150.4 } purple = { "aqi_low": 201, "aqi_hi": 300, "pm_low": 150.5, "pm_hi": 250.4 } maroon = { "aqi_low": 301, "aqi_hi": 500, "pm_low": 250.5, "pm_hi": 500.4 } colors = [green, yellow, orange, red, purple, maroon] categorized = False # Assign measurement to AQI category. for color in colors: if pm25 >= color["pm_low"] and pm25 <= color["pm_hi"]: cat = color categorized = True break # else: # pass # Put in highest category if still not assigned. if not categorized: cat = colors[-1] # EPA formula for AQI. aqi_num = (cat["aqi_hi"] - cat["aqi_low"]) / \ (cat["pm_hi"] - cat["pm_low"]) * \ (pm25 - cat["pm_low"]) + cat["aqi_low"] return aqi_num
199066221d91a527ea3c2f3f67a994eb13b7a708
3,647,672
from django.db import connection def require_lock(model, lock='ACCESS EXCLUSIVE'): """ Decorator for PostgreSQL's table-level lock functionality Example: @transaction.commit_on_success @require_lock(MyModel, 'ACCESS EXCLUSIVE') def myview(request) ... PostgreSQL's LOCK Documentation: http://www.postgresql.org/docs/8.3/interactive/sql-lock.html """ def require_lock_decorator(view_func): def wrapper(*args, **kwargs): if lock not in LOCK_MODES: raise ValueError('%s is not a PostgreSQL supported lock mode.') cursor = connection.cursor() cursor.execute( 'LOCK TABLE %s IN %s MODE' % (model._meta.db_table, lock) ) return view_func(*args, **kwargs) return wrapper return require_lock_decorator
1cfa74246ddbde9840f5e519e1481cd8773fb038
3,647,673
def welcome(): """List all available api routes.""" # Set the app.route() decorator for the "/api/v1.0/precipitation" route return ( f"Available Routes:<br/>" f"/api/v1.0/names<br/>" f"/api/v1.0/precipitation" )
74d6509fede66bf4243b9e4a4e107391b13aef16
3,647,674
def pbootstrap(data, R, fun, initval = None, ncpus = 1): """ :func pbootstrap: Calls boot method for R iteration in parallel and gets estimates of y-intercept and slope :param data: data - contains dataset :param R: number of iterations :param func: optim - function to get estimate of y-intercept and slope :param initval: initial guess of y-intercept and slope can be passed - optional :param ncpus: number of physical cores to run the pbootstrap method - optional :return: estimates of y-intercept and slope """ N = data.shape[0] thetas = Parallel(ncpus) (delayed(boot) (data, N, fun, initval) for _ in range(R)) return np.asarray(thetas)
f5d1b523969735ef30873f593472e79f8399622c
3,647,675
def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
654d085347913bca2fd2834816b988ea81ab7164
3,647,676
import numpy def create_LOFAR_configuration(antfile: str, meta: dict = None) -> Configuration: """ Define from the LOFAR configuration file :param antfile: :param meta: :return: Configuration """ antxyz = numpy.genfromtxt(antfile, skip_header=2, usecols=[1, 2, 3], delimiter=",") nants = antxyz.shape[0] assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape anames = numpy.genfromtxt(antfile, dtype='str', skip_header=2, usecols=[0], delimiter=",") mounts = numpy.repeat('XY', nants) location = EarthLocation(x=[3826923.9] * u.m, y=[460915.1] * u.m, z=[5064643.2] * u.m) fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, frame='global', diameter=35.0) return fc
0ed07f1cdd0ef193e51cf88d336cbb421f6ea248
3,647,677
def fmla_for_filt(filt): """ transform a set of column filters from a dictionary like { 'varX':['lv11','lvl2'],...} into an R selector expression like 'varX %in% c("lvl1","lvl2")' & ... """ return ' & '.join([ '{var} %in% c({lvls})'.format( var=k, lvls=','.join(map(lambda x:'"%s"' % x, v)) if type(v) == list else '"%s"' % v ) for k, v in filt.items() ])
149d23822a408ad0d96d7cefd393b489b4b7ecfa
3,647,678
def sfen_board(ban): """Convert ban (nrow*nrow array) to sfen string """ s = '' num = 0 for iy in range(nrow): for ix in range(nrow): i = iy*nrow + ix if ban[i]: if num: s += str(num) num = 0 s += ban[i] else: num += 1 if iy < 8: if num: s += str(num) num = 0 s += '/' return s
55bf08c39457278ff8aaca35f1dd5f3fd6955590
3,647,679
import time def join_simple_tables(G_df_dict, G_data_info, G_hist, is_train, remain_time): """ 获得G_df_dict['BIG'] """ start = time.time() if is_train: if 'relations' in G_data_info: G_hist['join_simple_tables'] = [x for x in G_data_info['relations'] if x['type'] == '1-1' and x['related_to_main_table'] == 'true'] else: G_hist['join_simple_tables'] = [] time_budget = G_data_info['time_budget'] Id = G_data_info['target_id'] target = G_data_info['target_label'] main_table_name = G_data_info['target_entity'] log('[+] join simple tables') G_df_dict['BIG'] = G_df_dict[main_table_name] # 如果为时序数据,对BIG表排序 if G_data_info['target_time'] != '': G_df_dict['BIG'].sort_values(by=G_data_info['target_time']) for relation in G_hist['join_simple_tables']: left_table_name = relation['left_entity'] right_table_name = relation['right_entity'] left_on = relation['left_on'] right_on = relation['right_on'] if main_table_name == left_table_name: merge_table_name = right_table_name skip_name = right_on else: merge_table_name = left_table_name left_on, right_on = right_on, left_on skip_name = left_on log(merge_table_name) merge_table = G_df_dict[merge_table_name].copy() merge_table.columns = [x if x in skip_name else merge_table_name + "_" + x for x in merge_table.columns] G_df_dict['BIG'] = G_df_dict['BIG'].merge(merge_table, left_on=left_on, right_on=right_on, how='left') log(f"G_df_dict['BIG'].shape: {G_df_dict['BIG'].shape}") end = time.time() remain_time -= (end - start) log("remain_time: {} s".format(remain_time)) return remain_time
ba625cee3d4ede6939b8e12ce734f85325044349
3,647,680
def create_epochs(data, events_onsets, sampling_rate=1000, duration=1, onset=0, index=None): """ Epoching a dataframe. Parameters ---------- data : pandas.DataFrame Data*time. events_onsets : list A list of event onsets indices. sampling_rate : int Sampling rate (samples/second). duration : int or list Duration(s) of each epoch(s) (in seconds). onset : int Epoch onset(s) relative to events_onsets (in seconds). index : list Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number. Returns ---------- epochs : dict dict containing all epochs. Example ---------- >>> import neurokit as nk >>> epochs = nk.create_epochs(data, events_onsets) Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - numpy """ # Convert ints to arrays if needed if isinstance(duration, list) or isinstance(duration, np.ndarray): duration = np.array(duration) else: duration = np.array([duration]*len(events_onsets)) if isinstance(onset, list) or isinstance(onset, np.ndarray): onset = np.array(onset) else: onset = np.array([onset]*len(events_onsets)) if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.Series): data = pd.DataFrame({"Signal": list(data)}) # Store durations duration_in_s = duration.copy() onset_in_s = onset.copy() # Convert to timepoints duration = duration*sampling_rate onset = onset*sampling_rate # Create the index if index is None: index = list(range(len(events_onsets))) else: if len(list(set(index))) != len(index): print("NeuroKit Warning: create_epochs(): events_names does not contain uniques names, replacing them by numbers.") index = list(range(len(events_onsets))) else: index = list(index) # Create epochs epochs = {} for event, event_onset in enumerate(events_onsets): epoch_onset = int(event_onset + onset[event]) epoch_end = int(event_onset+duration[event]+1) epoch = data[epoch_onset:epoch_end].copy() epoch.index = np.linspace(start=onset_in_s[event], stop=duration_in_s[event], num=len(epoch), endpoint=True) relative_time = np.linspace(start=onset[event], stop=duration[event], num=len(epoch), endpoint=True).astype(int).tolist() absolute_time = np.linspace(start=epoch_onset, stop=epoch_end, num=len(epoch), endpoint=True).astype(int).tolist() epoch["Epoch_Relative_Time"] = relative_time epoch["Epoch_Absolute_Time"] = absolute_time epochs[index[event]] = epoch return(epochs)
d173b04d5e5835509a41b3ac2288d0d01ff54784
3,647,681
from typing import Type def is_equal_limit_site( site: SiteToUse, limit_site: SiteToUse, site_class: Type[Site] ) -> None: """Check if site is a limit site.""" if site_class == Site: return site.point.x == limit_site.x and site.point.y == limit_site.y elif site_class == WeightedSite: return ( site.point.x == limit_site[0].x and site.point.y == limit_site[0].y and site.weight == limit_site[1] )
1ebe8b18749bb42cf1e55e89a1e861b687f8881b
3,647,682
def get_header(filename): """retrieves the header of an image Args: filename (str): file name Returns: (str): header """ im = fabio.open(filename) return im.header
a3c195d23b671179bc765c081c0a1e6b9119a71d
3,647,683
def gaussian_ll_pdf(x, mu, sigma): """Evaluates the (unnormalized) log of the normal PDF at point x Parameters ---------- x : float or array-like point at which to evaluate the log pdf mu : float or array-like mean of the normal on a linear scale sigma : float or array-like standard deviation of the normal on a linear scale """ log_pdf = -0.5*(x - mu)**2.0/sigma**2.0 #- np.log(sigma) - 0.5*np.log(2.0*np.pi) return log_pdf
dbf1e389ad8349093c6262b2c595a2e511f2cb28
3,647,684
def _show_traceback(method): """decorator for showing tracebacks in IPython""" def m(self, *args, **kwargs): try: return(method(self, *args, **kwargs)) except Exception as e: ip = get_ipython() if ip is None: self.log.warn("Exception in widget method %s: %s", method, e, exc_info=True) else: ip.showtraceback() return m
28909d57247d68200adf1e658ed4d3f7c36f0221
3,647,685
def ecdf(data): """Compute ECDF for a one-dimensional array of measurements.""" # Number of data points n = len(data) # x-data for the ECDF x = np.sort(data) # y-data for the ECDF y = np.arange(1, len(x)+1) / n return x, y
e0271f87e2c031a55c84de94dbfed34ec34d34f1
3,647,686
def import_module_part(request, pk): """Module part import. Use an .xlsx file to submit grades to a module part On GET the user is presented with a file upload form. On POST, the submitted .xlsx file is processed by the system, registering Grade object for each grade in the excel file. It dynamically detects the tests that are submitted (by exact name match or database ID), and omits extra columns silently. Also, lines that do not have a filled in student number are ignored. Students that are not declared as part of the module (def:import_student_to_module) raise an import error. :param request: Django request :param pk: Module part that grades should be submitted to :return: A redirect to the Grades course view on success. Otherwise a 404 (module does not exist), 403 (no permissions) or 400 (bad excel file or other import error) """ module_part = get_object_or_404(ModulePart, pk=pk) module_edition = get_object_or_404(ModuleEdition, modulepart=module_part) person = Person.objects.filter(user=request.user).filter( Q(coordinator__module_edition__modulepart=module_part) | Q(teacher__module_part=module_part) ).first() if not ModuleEdition.objects.filter(modulepart=module_part): raise Http404('Module does not exist.') if not (is_coordinator_or_assistant_of_module(person, module_edition) or is_coordinator_or_teacher_of_module_part(person, module_part)): raise PermissionDenied('You are not allowed to do this.') if request.method == "POST": form = GradeUploadForm(request.POST, request.FILES) if form.is_valid(): title_row = form.cleaned_data.get('title_row') - 1 # Check if /any/ tests and/or grades are imported. any_tests = False # List of all tests that are imported. all_tests = [] sheet = request.FILES['file'].get_book_dict() for table in sheet: # Check if the sheet has enough rows if title_row >= len(sheet[table]): return bad_request(request, {'message': 'The file that was uploaded was not recognised as a grade' ' excel file. Are you sure the file is an .xlsx file, and' ' that all fields are present? Otherwise, download a new' ' gradesheet and try using that instead.'}) test_rows = dict() university_number_field = None # Detect university_number and test columns for title_index in range(0, len(sheet[table][title_row])): # This is the university number column if ('number' in str(sheet[table][title_row][title_index]).lower()) or \ ('nummer' in str(sheet[table][title_row][title_index]).lower()): university_number_field = title_index else: # Attempt to find a Test # search by ID try: test = Test.objects.filter( pk=sheet[table][title_row][title_index]) if test and test.filter(module_part=module_part): test_rows[title_index] = sheet[table][title_row][title_index] # pk of Test any_tests = True except (ValueError, TypeError): pass # Not an int. # search by name if Test.objects.filter(module_part=module_part).filter( name=sheet[table][title_row][title_index]): test_rows[title_index] = Test.objects.filter( name=sheet[table][title_row][title_index] ).filter(module_part=module_part)[0].pk # pk of Test any_tests = True # Attempt to ignore test altogether. else: pass if university_number_field is None: continue # Ignore this sheet if len(test_rows.keys()) == 0: continue # Ignore this sheet # The current user's Person is the corrector of the grades. teacher = Person.objects.filter(user=request.user).first() grades = [] # Retrieve Test object beforehand to validate permissions on tests and speed up Grade creation tests = dict() for test_column in test_rows.keys(): tests[test_column] = Test.objects.get(pk=test_rows[test_column]) [all_tests.append(test) for test in tests.values() if test] # Check excel file for invalid students invalid_students = [] for row in sheet[table][(title_row + 1):]: if not Studying.objects.filter(person__university_number__contains=row[university_number_field]).filter( module_edition=module_edition): invalid_students.append(row[university_number_field]) # Check for invalid student numbers in the university_number column, but ignore empty fields. if [student for student in invalid_students if student is not '']: return bad_request(request, {'message': 'Students {} are not enrolled in this module.\n ' 'Enroll these students first before retrying' .format(invalid_students)}) # Make Grades for row in sheet[table][(title_row + 1):]: # Walk horizontally over table student = Person.objects.filter(university_number__contains=row[university_number_field]).first() # check if this is not an empty line, else continue. if student: for test_column in test_rows.keys(): try: grades.append(make_grade( student=student, corrector=teacher, test=tests[test_column], grade=row[test_column] )) except GradeException as e: # Called for either: bad grade, grade out of bounds return bad_request(request, {'message': e}) save_grades(grades) # Bulk-save grades. Also prevents a partial import of the sheet. # Check if anything was imported. if not any_tests: return bad_request(request, {'message': 'There were no tests recognized to import.'}) return render(request=request, template_name='importer/successfully_imported.html', context={'tests': all_tests}) else: return bad_request(request, {'message': 'The file uploaded was not recognised as a grade excel file.' ' Are you sure the file is an .xlsx file? Otherwise, download a new' ' gradesheet and try using that instead'}) else: # GET request form = GradeUploadForm() return render(request, 'importer/importmodulepart.html', {'form': form, 'pk': pk, 'module_part': module_part})
a915b426b8c870ee62a154a1370080e87a7de42f
3,647,687
from typing import Tuple def ordered_pair(x: complex) -> Tuple[float, float]: """ Returns the tuple (a, b), like the ordered pair in the complex plane """ return (x.real, x.imag)
c67e43cf80194f7a5c7c5fd20f2e52464816d056
3,647,688
from FuXi.Rete.RuleStore import SetupRuleStore def HornFromDL(owlGraph, safety=DATALOG_SAFETY_NONE, derivedPreds=[], complSkip=[]): """ Takes an OWL RDF graph, an indication of what level of ruleset safety (see: http://code.google.com/p/fuxi/wiki/FuXiUserManual#Rule_Safety) to apply, and a list of derived predicates and returns a Ruleset instance comprised of the rules extracted from the OWL RDF graph (using a variation of the OWL 2 RL transformation) """ ruleStore, ruleGraph, network = SetupRuleStore(makeNetwork=True) return network.setupDescriptionLogicProgramming( owlGraph, derivedPreds=derivedPreds, expanded=complSkip, addPDSemantics=False, constructNetwork=False, safety=safety)
37dfe479dd0f150956261197b47cfbd468285f92
3,647,690
def _assembleMatrix(data, indices, indptr, shape): """ Generic assemble matrix function to create a CSR matrix Parameters ---------- data : array Data values for matrix indices : int array CSR type indices indptr : int array Row pointer shape : tuple-like Actual shape of matrix Returns ------- M : scipy csr sparse matrix The assembled matrix """ M = sparse.csr_matrix((data, indices, indptr), shape) return M
6ada37b14270b314bcc6ba1ef55da10c07619731
3,647,691
def mock_state_store(decoy: Decoy) -> StateStore: """Get a mocked out StateStore.""" return decoy.mock(cls=StateStore)
db8e9e99dcd4bbc37094b09febb63c849550bc81
3,647,692
from typing import Callable from typing import List def beam_search_runner_range(output_series: str, decoder: BeamSearchDecoder, max_rank: int = None, postprocess: Callable[ [List[str]], List[str]]=None ) -> List[BeamSearchRunner]: """Return beam search runners for a range of ranks from 1 to max_rank. This means there is max_rank output series where the n-th series contains the n-th best hypothesis from the beam search. Args: output_series: Prefix of output series. decoder: Beam search decoder shared by all runners. max_rank: Maximum rank of the hypotheses. postprocess: Series-level postprocess applied on output. Returns: List of beam search runners getting hypotheses with rank from 1 to max_rank. """ check_argument_types() if max_rank is None: max_rank = decoder.beam_size if max_rank > decoder.beam_size: raise ValueError( ("The maximum rank ({}) cannot be " "bigger than beam size {}.").format( max_rank, decoder.beam_size)) return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r), decoder, r, postprocess) for r in range(1, max_rank + 1)]
01c63368219f4e1c95a7557df585893d68134478
3,647,693
def read_variants( pipeline, # type: beam.Pipeline all_patterns, # type: List[str] pipeline_mode, # type: PipelineModes allow_malformed_records, # type: bool representative_header_lines=None, # type: List[str] pre_infer_headers=False, # type: bool sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH, # type: int use_1_based_coordinate=False # type: bool ): # type: (...) -> pvalue.PCollection """Returns a PCollection of Variants by reading VCFs.""" compression_type = get_compression_type(all_patterns) if compression_type == filesystem.CompressionTypes.GZIP: splittable_bgzf = _get_splittable_bgzf(all_patterns) if splittable_bgzf: return (pipeline | 'ReadVariants' >> vcfio.ReadFromBGZF(splittable_bgzf, representative_header_lines, allow_malformed_records, pre_infer_headers, sample_name_encoding, use_1_based_coordinate)) if pipeline_mode == PipelineModes.LARGE: variants = (pipeline | 'InputFilePattern' >> beam.Create(all_patterns) | 'ReadAllFromVcf' >> vcfio.ReadAllFromVcf( representative_header_lines=representative_header_lines, compression_type=compression_type, allow_malformed_records=allow_malformed_records, pre_infer_headers=pre_infer_headers, sample_name_encoding=sample_name_encoding, use_1_based_coordinate=use_1_based_coordinate)) else: variants = pipeline | 'ReadFromVcf' >> vcfio.ReadFromVcf( all_patterns[0], representative_header_lines=representative_header_lines, compression_type=compression_type, allow_malformed_records=allow_malformed_records, pre_infer_headers=pre_infer_headers, sample_name_encoding=sample_name_encoding, use_1_based_coordinate=use_1_based_coordinate) if compression_type == filesystem.CompressionTypes.GZIP: variants |= 'FusionBreak' >> fusion_break.FusionBreak() return variants
5f706219ccc5a5f59980122b4fdac93e35056f5d
3,647,694
import numpy def carla_location_to_numpy_vector(carla_location): """ Convert a carla location to a icv vector3 Considers the conversion from left-handed system (unreal) to right-handed system (icv) :param carla_location: the carla location :type carla_location: carla.Location :return: a numpy.array with 3 elements :rtype: numpy.array """ return numpy.array([ carla_location.x, -carla_location.y, carla_location.z ])
a207ec5d878a07e62f96f21cd33c980cb1e5dacc
3,647,695
def prev_cur_next(lst): """ Returns list of tuples (prev, cur, next) for each item in list, where "prev" and "next" are the previous and next items in the list, respectively, or None if they do not exist. """ return zip([None] + lst[:-1], lst, lst[1:]) + [(lst[-2], lst[-1], None)]
c00cd27e1eaeffd335a44ac625cb740f126a06e5
3,647,696
import pathlib def vet_input_path(filename): """ Check if the given input file exists. Returns a pathlib.Path object if everything is OK, raises InputFileException if not. """ putative_path = pathlib.Path(filename) if putative_path.exists(): if not putative_path.is_file(): msg = ('A given input file is not infact a file. ' + \ 'You input {}.'.format(putative_path)) raise InputFileException(msg) else: msg = ('Could not find a specified input file. You input {}.'.format( putative_path)) raise InputFileException(msg) return putative_path
9c517cf9e56781b995d7109ea0983171760cf58c
3,647,697
import requests def check_for_updates(repo: str = REPO) -> str: """ Check for updates to the current version. """ message = "" url = f"https://api.github.com/repos/{repo}/releases/latest" response = requests.get(url) if response.status_code != 200: raise RuntimeError( f"Failed to get commit count. Status code: {response.status_code}" ) data = response.json() latest_version = data["name"] # returns "vx.x.x" current_version = f"v{_version.__version__}" # returns "vx.x.x" if latest_version != current_version: message = f"New version available: {latest_version}.\n\n" else: message = "No updates available.\n\n" master = get_status(current_version, "master") dev = get_status(current_version, "dev") for branch in ["master", "dev"]: name = branch.capitalize() if branch == "master": status, ahead_by, behind_by = master else: status, ahead_by, behind_by = dev if status == "behind": message += f"{name} is {status} by {behind_by} commits.\n" elif status == "ahead": message += f"{name} is {status} by {ahead_by} commits.\n" else: message += f"{name} is up to date.\n" return message
0d3b37a74e252552f1e912a0d7072b60a34de86d
3,647,698
def _process_image(record, training): """Decodes the image and performs data augmentation if training.""" image = tf.io.decode_raw(record, tf.uint8) image = tf.cast(image, tf.float32) image = tf.reshape(image, [32, 32, 3]) image = image * (1. / 255) - 0.5 if training: padding = 4 image = tf.image.resize_with_crop_or_pad(image, 32 + padding, 32 + padding) image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image) return image
0a255d954c7ca537f10be6ac5c077fd99aaf72cd
3,647,699
def svn_diff_fns2_invoke_token_discard(*args): """svn_diff_fns2_invoke_token_discard(svn_diff_fns2_t _obj, void diff_baton, void token)""" return _diff.svn_diff_fns2_invoke_token_discard(*args)
cdfd25973cf87190a6f82b07c82e741233c65fcd
3,647,700
import torch def process_bb(model, I, bounding_boxes, image_size=(412, 412)): """ :param model: A binary model to create the bounding boxes :param I: PIL image :param bounding_boxes: Bounding boxes containing regions of interest :param image_size: Choose the size of the patches :return: Patches with the class of the ROIS """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') patches = np.array([]) normalization = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) for (x, y, w, h) in bounding_boxes: patch = np.array(I.crop((x, y, x + w, y + h))) patch = cv2.resize(patch, image_size) patch = normalization(patch).unsqueeze(0).to(device) patch = model(patch).cpu().detach().numpy() patches = np.concatenate(patches, patch) return patches
2029fa67fb85ce3e15913ce9c1684cbe762ea3b7
3,647,701
def roc( observations, forecasts, bin_edges="continuous", dim=None, drop_intermediate=False, return_results="area", ): """Computes the relative operating characteristic for a range of thresholds. Parameters ---------- observations : xarray.Dataset or xarray.DataArray Labeled array(s) over which to apply the function. If ``bin_edges=='continuous'``, observations are binary. forecasts : xarray.Dataset or xarray.DataArray Labeled array(s) over which to apply the function. If ``bin_edges=='continuous'``, forecasts are probabilities. bin_edges : array_like, str, default='continuous' Bin edges for categorising observations and forecasts. Similar to np.histogram, \ all but the last (righthand-most) bin include the left edge and exclude the \ right edge. The last bin includes both edges. ``bin_edges`` will be sorted in \ ascending order. If ``bin_edges=='continuous'``, calculate ``bin_edges`` from \ forecasts, equal to ``sklearn.metrics.roc_curve(f_boolean, o_prob)``. dim : str, list The dimension(s) over which to compute the contingency table drop_intermediate : bool, default=False Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. Defaults to ``True`` in ``sklearn.metrics.roc_curve``. return_results: str, default='area' Specify how return is structed: - 'area': return only the ``area under curve`` of ROC - 'all_as_tuple': return ``true positive rate`` and ``false positive rate`` at each bin and area under the curve of ROC as tuple - 'all_as_metric_dim': return ``true positive rate`` and ``false positive rate`` at each bin and ``area under curve`` of ROC concatinated into new ``metric`` dimension Returns ------- xarray.Dataset or xarray.DataArray : reduced by dimensions ``dim``, see ``return_results`` parameter. ``true positive rate`` and ``false positive rate`` contain ``probability_bin`` dimension with ascending ``bin_edges`` as coordinates. Examples -------- >>> f = xr.DataArray(np.random.normal(size=(1000)), ... coords=[('time', np.arange(1000))]) >>> o = f.copy() >>> category_edges = np.linspace(-2, 2, 5) >>> xs.roc(o, f, category_edges, dim=['time']) <xarray.DataArray 'histogram_observations_forecasts' ()> array(1.) See also -------- xskillscore.Contingency sklearn.metrics.roc_curve References ---------- http://www.cawcr.gov.au/projects/verification/ """ if dim is None: dim = list(forecasts.dims) if isinstance(dim, str): dim = [dim] continuous = False if isinstance(bin_edges, str): if bin_edges == "continuous": continuous = True # check that o binary if isinstance(observations, xr.Dataset): o_check = observations.to_array() else: o_check = observations if str(o_check.dtype) != "bool": if not ((o_check == 0) | (o_check == 1)).all(): raise ValueError( 'Input "observations" must represent logical (True/False) outcomes', o_check, ) # works only for 1var if isinstance(forecasts, xr.Dataset): varlist = list(forecasts.data_vars) if len(varlist) == 1: v = varlist[0] else: raise ValueError( "Only works for `xr.Dataset` with one variable, found" f"{forecasts.data_vars}. Considering looping over `data_vars`" "or `.to_array()`." ) f_bin = forecasts[v] else: f_bin = forecasts f_bin = f_bin.stack(ndim=forecasts.dims) f_bin = f_bin.sortby(-f_bin) bin_edges = np.append(f_bin[0] + 1, f_bin) bin_edges = np.unique(bin_edges) # ensure that in ascending order else: raise ValueError("If bin_edges is str, it can only be continuous.") else: bin_edges = np.sort(bin_edges) # ensure that in ascending order # loop over each bin_edge and get true positive rate and false positive rate # from contingency tpr, fpr = [], [] for i in bin_edges: dichotomous_category_edges = np.array( [-np.inf, i, np.inf] ) # "dichotomous" means two-category dichotomous_contingency = Contingency( observations, forecasts, dichotomous_category_edges, dichotomous_category_edges, dim=dim, ) fpr.append(dichotomous_contingency.false_alarm_rate()) tpr.append(dichotomous_contingency.hit_rate()) tpr = xr.concat(tpr, "probability_bin") fpr = xr.concat(fpr, "probability_bin") tpr["probability_bin"] = bin_edges fpr["probability_bin"] = bin_edges fpr = fpr.fillna(1.0) tpr = tpr.fillna(0.0) # pad (0,0) and (1,1) fpr_pad = xr.concat( [ xr.ones_like(fpr.isel(probability_bin=0, drop=False)), fpr, xr.zeros_like(fpr.isel(probability_bin=-1, drop=False)), ], "probability_bin", ) tpr_pad = xr.concat( [ xr.ones_like(tpr.isel(probability_bin=0, drop=False)), tpr, xr.zeros_like(tpr.isel(probability_bin=-1, drop=False)), ], "probability_bin", ) if drop_intermediate and fpr.probability_bin.size > 2: fpr, tpr = _drop_intermediate(fpr, tpr) fpr_pad, tpr_pad = _drop_intermediate(fpr_pad, tpr_pad) area = _auc(fpr_pad, tpr_pad) if continuous: # sklearn returns in reversed order fpr = fpr.sortby(-fpr.probability_bin) tpr = tpr.sortby(-fpr.probability_bin) # mask always nan def _keep_masked(new, ori, dim): """Keep mask from `ori` deprived of dimensions from `dim` in input `new`.""" isel_dim = {d: 0 for d in forecasts.dims if d in dim} mask = ori.isel(isel_dim, drop=True) new_masked = new.where(mask.notnull()) return new_masked fpr = _keep_masked(fpr, forecasts, dim=dim) tpr = _keep_masked(tpr, forecasts, dim=dim) area = _keep_masked(area, forecasts, dim=dim) if return_results == "area": return area elif return_results == "all_as_metric_dim": results = xr.concat([fpr, tpr, area], "metric", coords="minimal") results["metric"] = [ "false positive rate", "true positive rate", "area under curve", ] return results elif return_results == "all_as_tuple": return fpr, tpr, area else: raise NotImplementedError( "expect `return_results` from [all_as_tuple, area, all_as_metric_dim], " f"found {return_results}" )
328e00060c758ddf3c12cecdec1961561bb2d3f3
3,647,702
from typing import Literal def make_grammar(): """Creates the grammar to be used by a spec matcher.""" # This is apparently how pyparsing recommends to be used, # as http://pyparsing.wikispaces.com/share/view/644825 states that # it is not thread-safe to use a parser across threads. unary_ops = ( # Order matters here (so that '=' doesn't match before '==') Literal("==") | Literal("=") | Literal("!=") | Literal("<in>") | Literal(">=") | Literal("<=") | Literal(">") | Literal("<") | Literal("s==") | Literal("s!=") | # Order matters here (so that '<' doesn't match before '<=') Literal("s<=") | Literal("s<") | # Order matters here (so that '>' doesn't match before '>=') Literal("s>=") | Literal("s>")) all_in_nary_op = Literal("<all-in>") or_ = Literal("<or>") # An atom is anything not an keyword followed by anything but whitespace atom = ~(unary_ops | all_in_nary_op | or_) + Regex(r"\S+") unary = unary_ops + atom nary = all_in_nary_op + OneOrMore(atom) disjunction = OneOrMore(or_ + atom) # Even-numbered tokens will be '<or>', so we drop them disjunction.setParseAction(lambda _s, _l, t: ["<or>"] + t[1::2]) expr = disjunction | nary | unary | atom return expr
aef2a3fc897c42e61ebd81c9d43cb42f342b1fb6
3,647,703
def _pull(keys): """helper method for implementing `client.pull` via `client.apply`""" if isinstance(keys, (list,tuple, set)): return [eval(key, globals()) for key in keys] else: return eval(keys, globals())
779fcec45c3693bdd8316c14138a88c57f0c318c
3,647,704
def position(df): """ 根据交易信号, 计算每天的仓位 :param df: :return: """ # 由 signal 计算出实际每天持有的股票仓位 df['pos'] = df['signal'].shift(1) df['pos'].fillna(method='ffill', inplace=True) # 将涨跌停时不得买卖股票考虑进来 # 找出开盘涨停的日期 cond_cannot_buy = df['开盘价'] > df['收盘价'].shift(1) * 1.097 # 今天的开盘价相对于昨天的收盘价上涨了 9.7% # 将开盘涨停日, 并且当天 position 为 1 时的 'pos' 设置为空值 # ?? 问题:为什么是 1? df.loc[cond_cannot_buy & (df['pos'] == 1), 'pos'] = None # 找出开盘跌停的日期 cond_cannot_buy = df['开盘价'] < df['收盘价'].shift(1) * 0.903 # 今天的开盘价相对于昨天的收盘价下跌了 9.7% # 将开盘跌停日, 并且当天 position 为 0 时的 'pos' 设置为空值 # ?? 问题:为什么是 0? df.loc[cond_cannot_buy & (df['pos'] == 0), 'pos'] = None # position 为空的日期, 不能买卖。position 只能和前一个交易日保持一致。 df['pos'].fillna(method='ffill', inplace=True) # 在 position 为空值的日期, 将 position 补全为 0 df['pos'].fillna(value=0, inplace=True) return df
15666e26cf8a9d6ae98ff1746aecab759de9139b
3,647,705
def prepare_data(data, preprocessed_data, args): """Prepare Data""" data = data.to_numpy() train_size = int(len(data) * args.train_split) test_size = len(data) - train_size train_X = preprocessed_data[0:train_size] train_Y = data[0:train_size] test_X = preprocessed_data[train_size:len(preprocessed_data)] test_Y = data[train_size:len(preprocessed_data)] return train_X, train_Y, test_X, test_Y
b5e120eebd6060656d71f8f76755afd0d8eccce5
3,647,706
def svn_client_conflict_tree_get_victim_node_kind(conflict): """svn_client_conflict_tree_get_victim_node_kind(svn_client_conflict_t * conflict) -> svn_node_kind_t""" return _client.svn_client_conflict_tree_get_victim_node_kind(conflict)
6258c011eb947ddedb1e060cd036ddcf9cbc1758
3,647,707
import importlib def load_module(script_path: str, module_name: str): """ return a module spec.loader.exec_module(foo) foo.A() """ spec = importlib.util.spec_from_file_location(module_name, script_path) module = importlib.util.module_from_spec(spec) return spec, module
61ebc105d0c7a168b37210452445e8e24e16f87a
3,647,708
def memory_func(func): """ декоратор для замера памяти занимаемой функцией в оперативной памяти. """ def wrapper(*args, **kwargs): proc = Process(getpid()) # получение идентификатора текущего процесса и объявление класса start_memory = proc.memory_info().rss # сохранение начального значения занятой памяти result = func(*args, **kwargs) # выполнение функции с параметрами end_memory = proc.memory_info().rss # замер объема занятой памяти после выполнения функции print(f"Физ. память используемая функцией {func.__name__}: {end_memory-start_memory} байт") # вывод результата return result return wrapper
a8c634b3415925b65fe35df584328705eb0d171e
3,647,710
def read_data(datafile='sampling_data_2015.txt'): """Imports data from an ordered txt file and creates a list of samples.""" sample_list = [] with open(datafile, 'r') as file: for line in file: method, date, block, site, orders = line.split('|') new_sample = sample(method, date, block, site) new_sample.import_orders(orders) sample_list.append(new_sample) return sample_list
4aee4b2ef0cd9d31eefbd9f394714f0ea789b49d
3,647,711
async def get_https(method: str = "all"): """Get https proxies from get_proxies_func() function.""" return await get_proxies_func("https", method)
575eccf2149724c29062d866fcc420fe3b34be78
3,647,712
def apply_mask(input, mask): """Filter out an area of an image using a binary mask. Args: input: A three channel numpy.ndarray. mask: A black and white numpy.ndarray. Returns: A three channel numpy.ndarray. """ return cv2.bitwise_and(input, input, mask=mask)
34451c71b9f18a64f5b27e3ff9269a9c4e3b803d
3,647,716
import requests import json def fetch_track_lyrics(artist, title): """ Returns lyrics when found, None when not found """ MUSIXMATCH_KEY = get_musixmatch_key() api_query = 'https://api.musixmatch.com/ws/1.1/matcher.lyrics.get?' api_query += 'q_track=%s&' % title api_query += 'q_artist=%s&' % artist api_query += 'apikey=%s' % MUSIXMATCH_KEY response = requests.get(api_query) if response.status_code != 200: raise Exception("Mixmatcher API not accessible") res_body = json.loads(response.text) message = res_body['message'] if message['header']['status_code'] != 200: return None body = message['body'] if 'lyrics' not in body: return None lyrics = body['lyrics'] return { 'lyrics': lyrics['lyrics_body'], 'lang': lyrics['lyrics_language'] }
f8e049578bb8c6b52636fd1e1789a81af30b28e6
3,647,718
def string_avg(strings, binary=True): """ Takes a list of strings of equal length and returns a string containing the most common value from each index in the string. Optional argument: binary - a boolean indicating whether or not to treat strings as binary numbers (fill in leading zeros if lengths differ). """ if binary: # Assume this is a binary number and fill leading zeros strings = deepcopy(strings) longest = len(max(strings, key=len)) for i in range(len(strings)): while len(strings[i]) < longest: split_string = strings[i].split("b") strings[i] = "0b0" + split_string[1] avg = "" for i in (range(len(strings[0]))): opts = [] for s in strings: opts.append(s[i]) avg += max(set(opts), key=opts.count) return avg
3d515cbeedc93b95c5f38de62000629002e41166
3,647,719
def refresh(): """Pull fresh data from Open AQ and replace existing data.""" DB.drop_all() DB.create_all() api = openaq.OpenAQ() status, body = api.measurements(city='Los Angeles', parameter='pm25') reading_list = [] for reading in body['results']: object = Record(datetime = reading['date']['utc'], value = reading['value']) DB.session.add(object) DB.session.commit() return 'Data refreshed!'
9fcf71ffe0e5a46119e98f17c12aae29721285c8
3,647,720
import json def update_organization(current_user): """ Обновление информации об организации. """ try: if CmsUsers.can(current_user.id, "put", "contacts"): organization = CmsOrganization.query.first() update_data = request.get_json() for key in list(update_data.keys()): if key not in ['company_name', 'full_company_name', 'requisites']: del update_data[key] if not organization_update_validator.is_valid(update_data): errors = [] for error in sorted( organization_update_validator.iter_errors( update_data), key=str): errors.append(error.message) separator = '; ' error_text = separator.join(errors) response = Response( response=json.dumps({'type': 'danger', 'text': error_text}), status=422, mimetype='application/json' ) else: organization_name_old = organization.company_name organization.company_name = update_data['company_name'] organization.full_company_name = update_data[ 'full_company_name'] if 'requisites' in update_data: organization.requisites = update_data['requisites'] db.session.add(organization) db.session.commit() response = Response( response=json.dumps( {'type': 'success', 'text': 'Отредактирована основная ' 'информация организации ' + str(organization_name_old) + '!', 'link': url_for('.get_organization', _external=True)}), status=200, mimetype='application/json' ) else: response = Response( response=json.dumps({'type': 'danger', 'text': 'Доступ запрещен (403)'}), status=403, mimetype='application/json' ) except Exception: response = server_error(request.args.get("dbg")) return response
ff7826b1b4537eb0b793b426a6b6aa097936bfa9
3,647,721
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_sep] sep_base_path=/sepm/api/v1 sep_auth_path=/sepm/api/v1/identity/authenticate sep_host=<SEPM server dns name or ip address> sep_port=8446 sep_username=<username> sep_password=<password> sep_domain=<SEP domain name> # Optional settings for access to SEPM via a proxy. #http_proxy=http://proxy:80 #https_proxy=http://proxy:80 # Limit result sent to Resilient, add full result as an attachment. sep_results_limit=200 # Period of time (seconds) to wait for all endpoints to return a scan result. sep_scan_timeout=1800 """ return config_data
fcad9aa412d66b4a48dfb753d64e0f84979df617
3,647,722
def read_eieio_command_message(data, offset): """ Reads the content of an EIEIO command message and returns an object\ identifying the command which was contained in the packet, including\ any parameter, if required by the command :param data: data received from the network :type data: bytestring :param offset: offset at which the parsing operation should start :type offset: int :return: an object which inherits from EIEIOCommandMessage which contains\ parsed data received from the network :rtype: \ :py:class:`spinnman.messages.eieio.command_messages.eieio_command_message.EIEIOCommandMessage` """ command_header = EIEIOCommandHeader.from_bytestring(data, offset) command_number = command_header.command if (command_number == constants.EIEIO_COMMAND_IDS.DATABASE_CONFIRMATION.value): return DatabaseConfirmation.from_bytestring( command_header, data, offset + 2) # Fill in buffer area with padding elif (command_number == constants.EIEIO_COMMAND_IDS.EVENT_PADDING.value): return PaddingRequest() # End of all buffers, stop execution elif (command_number == constants.EIEIO_COMMAND_IDS.EVENT_STOP.value): return EventStopRequest() # Stop complaining that there is sdram free space for buffers elif (command_number == constants.EIEIO_COMMAND_IDS.STOP_SENDING_REQUESTS.value): return StopRequests() # Start complaining that there is sdram free space for buffers elif (command_number == constants.EIEIO_COMMAND_IDS.START_SENDING_REQUESTS.value): return StartRequests() # Spinnaker requesting new buffers for spike source population elif (command_number == constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_BUFFERS.value): return SpinnakerRequestBuffers.from_bytestring( command_header, data, offset + 2) # Buffers being sent from host to SpiNNaker elif (command_number == constants.EIEIO_COMMAND_IDS.HOST_SEND_SEQUENCED_DATA.value): return HostSendSequencedData.from_bytestring( command_header, data, offset + 2) # Buffers available to be read from a buffered out vertex elif (command_number == constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_READ_DATA.value): return SpinnakerRequestReadData.from_bytestring( command_header, data, offset + 2) # Host confirming data being read form SpiNNaker memory elif (command_number == constants.EIEIO_COMMAND_IDS.HOST_DATA_READ.value): return HostDataRead.from_bytestring( command_header, data, offset + 2) return EIEIOCommandMessage(command_header, data, offset + 2)
05abce201acf3b706e4b476c15eb2af5a0102cc8
3,647,723
def quote(): """Get stock quote.""" if request.method == "POST": # Get values get_symbol = request.form.get("symbol") stock = lookup(get_symbol) # Ensure symbol was submitted if not get_symbol: return apology("must provide symbol") # Ensure symbol exists elif not stock: return apology("stock not found") # Display stocks else: return render_template("quoted.html", stock=stock) else: return render_template("quote.html", stock=None)
b26c04c01e5ddb26c19e555a45e3fac6f58c0fef
3,647,724
def counts_to_df(value_counts, colnames, n_points): """DO NOT USE IT! """ pdf = pd.DataFrame(value_counts .to_frame('count') .reset_index() .apply(lambda row: dict({'count': row['count']}, **dict(zip(colnames, row['index'].toArray()))), axis=1) .values .tolist()) pdf['count'] /= pdf['count'].sum() proportions = pdf['count'] / pdf['count'].min() factor = int(n_points / proportions.sum()) pdf = pd.concat([pdf[colnames], (proportions * factor).astype(int)], axis=1) combinations = pdf.apply(lambda row: row.to_dict(), axis=1).values.tolist() return pd.DataFrame([dict(v) for c in combinations for v in int(c.pop('count')) * [list(c.items())]])
85d5283f2d53dcf3ec33d7a1f3f52d9acc0affde
3,647,725
import math def make_pair_plot(samples, param_names=None, pair_plot_params=PairPlotParams()): """ Make a pair plot for the parameters from posterior destribution. Parameters ----------- samples : Panda's DataFrame Each column contains samples from posterior distribution. param_names : list of str Names of the parameters for plotting. If None, all will be plotted. Returns ------- Seaborn's PairGrid """ param_names = filter_param_names(samples.columns, param_names) if len(param_names) > pair_plot_params.max_params: print(( f'Showing only first {pair_plot_params.max_params} ' f'parameters out of {len(param_names)} in pair plot.' 'Consider limiting the parameter with "param_names".')) param_names = param_names[:pair_plot_params.max_params] samples = samples[param_names] # Show no more than `max_samples` markers keep_nth = math.ceil(samples.shape[0] / pair_plot_params.max_samples) samples = samples[::keep_nth] g = sns.PairGrid(samples) g = g.map_upper(sns.scatterplot, s=pair_plot_params.marker_size, color=pair_plot_params.color, edgecolor=pair_plot_params.edgecolor, alpha=pair_plot_params.alpha) g = g.map_lower(sns.kdeplot, color=pair_plot_params.color) g = g.map_diag(plt.hist, color=pair_plot_params.color, edgecolor=pair_plot_params.diag_edge_color) return g
73f5d8fc7dee8b3179cb8c1513eb2989c788e7cf
3,647,726
def read_meta_soe(metafile): """read soe metadata.csv to get filename to meta mapping""" wavfiles = csv2dict(metafile) return {f['fid']:{k:v for (k,v) in f.items() if k!='fid'} for f in wavfiles}
51f82a45d12b332d9edbe7b027dc7ee2582af35b
3,647,727
def send_message(message, string, dm=False, user=None, format_content=True): """send_message Sends a message with string supplied by [lang]_STRING.txt files. :param message: MessageWrapper object with data for formatting. :param string: Name of the string to read. :param dm: Whether the message should be sent to dm. Requires user to not be None :param user: User for dm usage. """ msg = get_string(string, users.get_language(message)) if not msg or msg == MessageCode.UNKNOWN_STRING: return MessageCode.NO_STRING return send_custom_message(message, msg, dm=dm, user=user, format_content=format_content)
c8396108126fcaea735a94be3dcd4ed954f43d70
3,647,728
def calc_torque(beam, fforb, index=False): """ Calculates torque from a neutral beam (or beam component) torque = F * r_tan = (P/v) * r_tan = (P/sqrt(2E/m)) * r_tan = P * sqrt(m/(2E)) * r_tan :param fforb: :param index: :param beam: beam object with attributes z, m, a, en, pwr, rtan :return: torque """ if index is not False: power = beam.P.W[index] energy = beam.E.J[index] mass = beam.m rtan = beam.rtang[index] torque = power * np.sqrt(0.5 * mass / energy) * rtan * (1.0 - fforb) # Piper Changes: Included fast ion losses. return torque else: power = beam.P.W energy = beam.E.J mass = beam.m rtan = beam.rtang torque = power * np.sqrt(0.5 * mass / energy) * rtan * (1.0-fforb) # Piper Changes: Included fast ion losses. return torque
55cb8172f874a1d25c6dcf36c693f818d11d59c4
3,647,729
def cli(ctx, user_id): """Create a new API key for a given user. Output: the API key for the user """ return ctx.gi.users.create_user_apikey(user_id)
d7dafd77ef983286184b6f5aa2362bb734389696
3,647,730
import re def whitespace_tokenizer(text): """Tokenize on whitespace, keeping whitespace. Args: text: The text to tokenize. Returns: list: A list of pseudo-word tokens. """ return re.findall(r"\S+\s*", text)
e79234b15912fdc225e2571788844732296f93d7
3,647,731
def _u2i(number): """ Converts a 32 bit unsigned number to signed. If the number is negative it indicates an error. On error a pigpio exception will be raised if exceptions is True. """ v = u2i(number) if v < 0: if exceptions: raise error(error_text(v)) return v
920a2dcbf68df34141c482c2318917ccff248501
3,647,732
def apply_once(func, arr, axes, keepdims=True): """ Similar to `numpy.apply_over_axes`, except this performs the operation over a flattened version of all the axes, meaning that the function will only be called once. This only makes a difference for non-linear functions. Parameters ---------- func : callback Function that operates well on Numpy arrays and returns a single value of compatible dtype. arr : ndarray Array to do operation over. axes : int or iterable Specifies the axes to perform the operation. Only one call will be made to `func`, with all values flattened. keepdims : bool By default, this is True, so the collapsed dimensions remain with length 1. This is simlar to `numpy.apply_over_axes` in that regard. If this is set to False, the dimensions are removed, just like when using for instance `numpy.sum` over a single axis. Note that this is safer than subsequently calling squeeze, since this option will preserve length-1 dimensions that were not operated on. Examples -------- >>> import deepdish as dd >>> import numpy as np >>> rs = np.random.RandomState(0) >>> x = rs.uniform(size=(10, 3, 3)) Image that you have ten 3x3 images and you want to calculate each image's intensity standard deviation: >>> np.apply_over_axes(np.std, x, [1, 2]).ravel() array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604, 0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635]) This is the same as ``x.std(1).std(1)``, which is not the standard deviation of all 9 pixels together. To fix this we can flatten the pixels and try again: >>> x.reshape(10, 9).std(axis=1) array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064, 0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) This is exactly what this function does for you: >>> dd.apply_once(np.std, x, [1, 2], keepdims=False) array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064, 0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) """ all_axes = np.arange(arr.ndim) if isinstance(axes, int): axes = {axes} else: axes = set(axis % arr.ndim for axis in axes) principal_axis = min(axes) for i, axis in enumerate(axes): axis0 = principal_axis + i if axis != axis0: all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0] transposed_arr = arr.transpose(all_axes) new_shape = [] new_shape_keepdims = [] for axis, dim in enumerate(arr.shape): if axis == principal_axis: new_shape.append(-1) elif axis not in axes: new_shape.append(dim) if axis in axes: new_shape_keepdims.append(1) else: new_shape_keepdims.append(dim) collapsed = np.apply_along_axis(func, principal_axis, transposed_arr.reshape(new_shape)) if keepdims: return collapsed.reshape(new_shape_keepdims) else: return collapsed
939eea81d4443a4ef144105b1cc9335000b20f49
3,647,733
from io import BytesIO def bytes_to_bytesio(bytestream): """Convert a bytestring to a BytesIO ready to be decoded.""" fp = BytesIO() fp.write(bytestream) fp.seek(0) return fp
d59e4f5ccc581898da20bf5d3f6e70f8e8712aa6
3,647,734
from typing import List def image_scatter_channels(im: Image, subimages=None) -> List[Image]: """Scatter an image into a list of subimages using the channels :param im: Image :param subimages: Number of channels :return: list of subimages """ image_list = list() if subimages is None: subimages = im.shape[0] for slab in image_channel_iter(im, subimages=subimages): image_list.append(slab) assert len(image_list) == subimages, "Too many subimages scattered" return image_list
f87cb88ef060a6d093dacabdaab0ebc94861b734
3,647,735
def unauthorized_handler(): """ If unauthorized requests are arrived then redirect sign-in URL. :return: Redirect sign-in in page """ current_app.logger.info("Unauthorized user need to sign-in") return redirect(url_for('userView.signin'))
72b505ae13023aea23e0353b6571da64d9f647b8
3,647,736
import posixpath def pre_order_next(path, children): """Returns the next dir for pre-order traversal.""" assert path.startswith('/'), path # First subdir is next for subdir in children(path): return posixpath.join(path, subdir) while path != '/': # Next sibling is next name = posixpath.basename(path) parent = posixpath.dirname(path) siblings = list(children(parent)) assert name in siblings if name != siblings[-1]: return posixpath.join(parent, siblings[siblings.index(name) + 1]) # Go up, find a sibling of the parent. path = parent # This was the last one return None
fcbe2b17b29396ac978f4a931a454c988e6fe05b
3,647,737
def gettiming(process_list, typetiming): """ Used to get a sort set for different duration needed to conver to morse code. """ timing = [] for x in process_list: if(x[0] == typetiming): timing.append(x[3]) timing = set(timing) return sorted(timing)
8e71449eacaee086f9f9147e1c3b8602ce8e553f
3,647,738
import click def init(): """Top level command handler.""" @click.command() @click.option('--approot', type=click.Path(exists=True), envvar='TREADMILL_APPROOT', required=True) @click.argument('eventfile', type=click.Path(exists=True)) def configure(approot, eventfile): """Configure local manifest and schedule app to run.""" tm_env = appenv.AppEnvironment(root=approot) container_dir = app_cfg.configure(tm_env, eventfile) _LOGGER.info('Configured %r', container_dir) return configure
2c24fe8dc2225b7f2f848ac3d2ef09275829c754
3,647,739
def tobooks(f: '(toks, int) -> DataFrame', bks=bktksall) -> DataFrame: """Apply a function `f` to all the tokens in each book, putting the results into a DataFrame column, and adding a column to indicate each book. """ return pd.concat([f(v, i) for i, v in bks.items()])
e081e1c01d68b7b84cb6395f32f217360657636f
3,647,741
def _identity_error_message(msg_type, message, status_code, request): """ Set the response code on the request, and return a JSON blob representing a Identity error body, in the format Identity returns error messages. :param str msg_type: What type of error this is - something like "badRequest" or "itemNotFound" for Identity. :param str message: The message to include in the body. :param int status_code: The status code to set :param request: the request to set the status code on :return: dictionary representing the error body """ request.setResponseCode(status_code) return { msg_type: { "message": message, "code": status_code } }
d73e182fc794f01c3415069ffeb37e76a01df7af
3,647,742
def _string_to_list(s, dtype='str'): """ converts string to list Args: s: input dtype: specifies the type of elements in the list can be one of `str` or `int` """ if ' <SENT/> ' in s: return s.split(' <SENT/> ') elif dtype == 'int': return [int(e) for e in s.split(LIST_SEPARATOR) if e] if dtype == 'str': return s.split(LIST_SEPARATOR) elif dtype == 'int': return [int(e) for e in s.split(LIST_SEPARATOR) if e]
9d2950afcd9f47e1fef7856af117953dbf99410a
3,647,743
import warnings def internal_solve_pounders( criterion, x0, lower_bounds, upper_bounds, gtol_abs, gtol_rel, gtol_scaled, maxinterp, maxiter, delta, delta_min, delta_max, gamma0, gamma1, theta1, theta2, eta0, eta1, c1, c2, solver_sub, maxiter_sub, maxiter_gradient_descent_sub, gtol_abs_sub, gtol_rel_sub, gtol_scaled_sub, gtol_abs_conjugate_gradient_sub, gtol_rel_conjugate_gradient_sub, k_easy_sub, k_hard_sub, batch_evaluator, n_cores, ): """Find the local minimum to a non-linear least-squares problem using POUNDERS. Args: criterion_and_derivative (callable): Function that returns criterion and derivative as a tuple. x0 (np.ndarray): Initial guess for the parameter vector (starting points). lower_bounds (np.ndarray): Lower bounds. Must have same length as the initial guess of the parameter vector. Equal to -1 if not provided by the user. upper_bounds (np.ndarray): Upper bounds. Must have same length as the initial guess of the parameter vector. Equal to 1 if not provided by the user. gtol_abs (float): Convergence tolerance for the absolute gradient norm. Stop if norm of the gradient is less than this. gtol_rel (float): Convergence tolerance for the relative gradient norm. Stop if norm of the gradient relative to the criterion value is less than this. gtol_scaled (float): Convergence tolerance for the scaled gradient norm. Stop if norm of the gradient divided by norm of the gradient at the initial parameters is less than this. maxinterp (int): Maximum number of interpolation points. Default is `2 * n + 1`, where `n` is the length of the parameter vector. maxiter (int): Maximum number of iterations. If reached, terminate. delta (float): Delta, initial trust-region radius. delta_min (float): Minimal trust-region radius. delta_max (float): Maximal trust-region radius. gamma0 (float): Shrinking factor of the trust-region radius in case the solution vector of the suproblem is not accepted, but the model is fully linar (i.e. "valid"). gamma1 (float): Expansion factor of the trust-region radius in case the solution vector of the suproblem is accepted. theta1 (float): Threshold for adding the current candidate vector to the model. Function argument to find_affine_points(). theta2 (float): Threshold for adding the current candidate vector to the model. Argument to get_interpolation_matrices_residual_model(). eta0 (float): Threshold for accepting the solution vector of the trust-region subproblem as the best candidate. eta1 (float): Threshold for successfully accepting the solution vector of the trust-region subproblem as the best candidate. c1 (float): Treshold for accepting the norm of our current x candidate. Equal to sqrt(n) by default. Argument to find_affine_points() in case the input array *model_improving_points* is zero. c2 (int)): Treshold for accepting the norm of our current candidate vector. Equal to 10 by default. Argument to find_affine_points() in case the input array *model_improving_points* is not zero. solver_sub (str): Solver to use for the trust-region subproblem. Two internal solvers are supported: - "bntr": Bounded Newton Trust-Region (default, supports bound constraints) - "gqtpar": (does not support bound constraints) maxiter_sub (int): Maximum number of iterations in the trust-region subproblem. maxiter_gradient_descent_sub (int): Maximum number of gradient descent iterations to perform when the trust-region subsolver BNTR is used. gtol_abs_sub (float): Convergence tolerance for the absolute gradient norm in the trust-region subproblem ("BNTR"). gtol_rel_sub (float): Convergence tolerance for the relative gradient norm in the trust-region subproblem ("BNTR"). gtol_scaled_sub (float): Convergence tolerance for the scaled gradient norm in the trust-region subproblem ("BNTR"). gtol_abs_conjugate_gradient_sub (float): Convergence tolerance for the absolute gradient norm in the conjugate gradient step of the trust-region subproblem ("BNTR"). gtol_rel_conjugate_gradient_sub (float): Convergence tolerance for the relative gradient norm in the conjugate gradient step of the trust-region subproblem ("BNTR"). k_easy_sub (float): topping criterion for the "easy" case in the trust-region subproblem ("GQTPAR"). k_hard_sub (float): Stopping criterion for the "hard" case in the trust-region subproblem ("GQTPAR"). batch_evaluator (str or callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or callable with the same interface as the estimagic batch_evaluators. n_cores (int): Number of processes used to parallelize the function evaluations. Default is 1. Returns: (dict) Result dictionary containing: - solution_x (np.ndarray): Solution vector of shape (n,). - solution_criterion (np.ndarray): Values of the criterion function at the solution vector. Shape (n_obs,). - history_x (np.ndarray): Entire history of x. Shape (history.get_n_fun(), n). - history_criterion (np.ndarray): Entire history of the criterion function evaluations. Shape (history.get_n_fun(), n_obs) - n_iterations (int): Number of iterations the algorithm ran before finding a solution vector or reaching maxiter. - "success" (bool): Boolean indicating whether a solution has been found before reaching maxiter. """ history = LeastSquaresHistory() n = len(x0) model_indices = np.zeros(maxinterp, dtype=int) n_last_modelpoints = 0 if lower_bounds is not None and upper_bounds is not None: if np.max(x0 + delta - upper_bounds) > 1e-10: raise ValueError("Starting points + delta > upper bounds.") xs = [x0] for i in range(n): x1 = x0.copy() x1[i] += delta xs.append(x1) residuals = batch_evaluator(criterion, arguments=xs, n_cores=n_cores) history.add_entries(xs, residuals) accepted_index = history.get_best_index() residual_model = create_initial_residual_model( history=history, accepted_index=accepted_index, delta=delta ) main_model = create_main_from_residual_model( residual_model=residual_model, multiply_square_terms_with_residuals=False ) x_accepted = history.get_best_x() gradient_norm_initial = np.linalg.norm(main_model.linear_terms) gradient_norm_initial *= delta valid = True n_modelpoints = n + 1 last_model_indices = np.zeros(maxinterp, dtype=int) converged = False convergence_reason = "Continue iterating." for niter in range(maxiter + 1): result_sub = solve_subproblem( x_accepted=x_accepted, main_model=main_model, lower_bounds=lower_bounds, upper_bounds=upper_bounds, delta=delta, solver=solver_sub, maxiter=maxiter_sub, maxiter_gradient_descent=maxiter_gradient_descent_sub, gtol_abs=gtol_abs_sub, gtol_rel=gtol_rel_sub, gtol_scaled=gtol_scaled_sub, gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient_sub, gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient_sub, k_easy=k_easy_sub, k_hard=k_hard_sub, ) x_candidate = x_accepted + result_sub["x"] * delta residuals_candidate = criterion(x_candidate) history.add_entries(x_candidate, residuals_candidate) predicted_reduction = history.get_critvals( accepted_index ) - history.get_critvals(-1) actual_reduction = -result_sub["criterion"] with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) rho = np.divide(predicted_reduction, actual_reduction) if (rho >= eta1) or (rho > eta0 and valid is True): residual_model = residual_model._replace( intercepts=history.get_residuals(index=accepted_index) ) center_info = {"x": history.get_best_x(), "radius": delta} x_candidate = history.get_centered_xs(center_info, index=-1) residual_model = update_residual_model_with_new_accepted_x( residual_model=residual_model, x_candidate=x_candidate ) main_model = update_main_model_with_new_accepted_x( main_model=main_model, x_candidate=x_candidate ) x_accepted = history.get_best_x() accepted_index = history.get_best_index() critval_accepted = history.get_critvals(index=accepted_index) # The model is deemend "not valid" if it has less than n model points. # Otherwise, if the model has n points, it is considered "valid" or # "fully linear". # Note: valid is True in the first iteration if not valid: ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=np.zeros((n, n)), project_x_onto_null=False, delta=delta, theta1=theta1, c=c1, model_indices=model_indices, n_modelpoints=0, ) if n_modelpoints < n: ( history, model_indices, ) = add_geomtery_points_to_make_main_model_fully_linear( history=history, main_model=main_model, model_improving_points=model_improving_points, model_indices=model_indices, x_accepted=x_accepted, n_modelpoints=n_modelpoints, delta=delta, criterion=criterion, lower_bounds=lower_bounds, upper_bounds=upper_bounds, batch_evaluator=batch_evaluator, n_cores=n_cores, ) n_modelpoints = n delta_old = delta delta = update_trustregion_radius( result_subproblem=result_sub, rho=rho, model_is_valid=valid, delta=delta, delta_min=delta_min, delta_max=delta_max, eta1=eta1, gamma0=gamma0, gamma1=gamma1, ) ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=np.zeros((n, n)), project_x_onto_null=False, delta=delta, theta1=theta1, c=c1, model_indices=model_indices, n_modelpoints=0, ) if n_modelpoints == n: valid = True else: valid = False ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=model_improving_points, project_x_onto_null=project_x_onto_null, delta=delta, theta1=theta1, c=c2, model_indices=model_indices, n_modelpoints=n_modelpoints, ) if n_modelpoints < n: ( history, model_indices, ) = add_geomtery_points_to_make_main_model_fully_linear( history=history, main_model=main_model, model_improving_points=model_improving_points, model_indices=model_indices, x_accepted=x_accepted, n_modelpoints=n_modelpoints, delta=delta, criterion=criterion, lower_bounds=lower_bounds, upper_bounds=upper_bounds, batch_evaluator=batch_evaluator, n_cores=n_cores, ) model_indices, n_model_points = update_model_indices_residual_model( model_indices, accepted_index, n_modelpoints ) ( x_sample_monomial_basis, monomial_basis, basis_null_space, lower_triangular, n_modelpoints, ) = get_interpolation_matrices_residual_model( history=history, x_accepted=x_accepted, model_indices=model_indices, delta=delta, c2=c2, theta2=theta2, n_maxinterp=maxinterp, n_modelpoints=n_modelpoints, ) center_info = {"x": x_accepted, "radius": delta_old} interpolation_set = history.get_centered_xs( center_info, index=model_indices[:n_modelpoints] ) residual_model_interpolated = interpolate_residual_model( history=history, interpolation_set=interpolation_set, residual_model=residual_model, model_indices=model_indices, n_modelpoints=n_modelpoints, n_maxinterp=maxinterp, ) coefficients_residual_model = get_coefficients_residual_model( x_sample_monomial_basis=x_sample_monomial_basis, monomial_basis=monomial_basis, basis_null_space=basis_null_space, lower_triangular=lower_triangular, residual_model_interpolated=residual_model_interpolated, n_modelpoints=n_modelpoints, ) residual_model = residual_model._replace( intercepts=history.get_residuals(index=accepted_index) ) residual_model = update_residual_model( residual_model=residual_model, coefficients_to_add=coefficients_residual_model, delta=delta, delta_old=delta_old, ) main_model = create_main_from_residual_model(residual_model) gradient_norm = np.linalg.norm(main_model.linear_terms) gradient_norm *= delta ( last_model_indices, n_last_modelpoints, same_model_used, ) = get_last_model_indices_and_check_for_repeated_model( model_indices=model_indices, last_model_indices=last_model_indices, n_modelpoints=n_modelpoints, n_last_modelpoints=n_last_modelpoints, ) converged, convergence_reason = _check_for_convergence( gradient_norm=gradient_norm, gradient_norm_initial=gradient_norm_initial, critval=critval_accepted, delta=delta, delta_old=delta_old, same_model_used=same_model_used, converged=converged, reason=convergence_reason, niter=niter, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, maxiter=maxiter, ) if converged: break result_dict = { "solution_x": history.get_xs(index=accepted_index), "solution_criterion": history.get_best_residuals(), "history_x": history.get_xs(), "history_criterion": history.get_residuals(), "n_iterations": niter, "success": converged, "message": convergence_reason, } return result_dict
c3f602af6f78a1cb57c15a6488e4aeadcd081951
3,647,744
import torch def get_overlap_info(bbox): """ input: box_priors: [batch_size, number_obj, 4] output: [number_object, 6] number of overlapped obj (self not included) sum of all intersection area (self not included) sum of IoU (Intersection over Union) average of all intersection area (self not included) average of IoU (Intersection over Union) roi area """ batch_size, num_obj, bsize = bbox.shape # generate input feat overlap_info = Variable(torch.FloatTensor(batch_size, num_obj, 6).zero_().cuda()) # each obj has how many overlaped objects reverse_eye = Variable(1.0 - torch.eye(num_obj).float().cuda()) # removed diagonal elements for i in range(batch_size): sliced_bbox = bbox[i].view(num_obj, bsize) sliced_intersection = bbox_intersections(sliced_bbox, sliced_bbox) sliced_overlap = bbox_overlaps(sliced_bbox, sliced_bbox, sliced_intersection) sliced_area = bbox_area(sliced_bbox) # removed diagonal elements sliced_intersection = sliced_intersection * reverse_eye sliced_overlap = sliced_overlap * reverse_eye # assign value overlap_info[i, :, 0] = (sliced_intersection > 0.0).float().sum(1) overlap_info[i, :, 1] = sliced_intersection.sum(1) overlap_info[i, :, 2] = sliced_overlap.sum(1) overlap_info[i, :, 3] = overlap_info[i, :, 1] / (overlap_info[i, :, 0] + 1e-9) overlap_info[i, :, 4] = overlap_info[i, :, 2] / (overlap_info[i, :, 0] + 1e-9) overlap_info[i, :, 5] = sliced_area return overlap_info.view(batch_size * num_obj, 6)
451507a49fca589bc1102b085eab551ebe32bcc7
3,647,745
def get_current_language(request, set_default=True, default_id=1): """ Description: Returns the current active language. Will set a default language if none is found. Args: request (HttpRequest): HttpRequest from Django set_default (Boolean): Indicates if a default language must be activated (if none currently is). Default to True. default_id (Integer): The PK for the default Language instance. Default to 1 Returns: Language: The currently used language from our app's Language model """ # Base variables language = None language_name = request.session.get(LANGUAGE_SESSION_KEY, False) # Get the language if language_name: try: language = Language.objects.get(django_language_name=language_name) except Language.DoesNotExist: pass # Set a default language if necessary if language is None and set_default: language = set_default_language(request, default_id) # Always return the active language return language
98bc2a25201dc87afcee24d8ff5d10fcab7849bb
3,647,746
def is_leap_year(year): """ returns True for leap year and False otherwise :param int year: calendar year :return bool: """ # return (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0) return year % 100 != 0 or year % 400 == 0 if year % 4 == 0 else False
5bd0bb7a44dc7004b9198cb3d8ed244dc02417c2
3,647,747
def update_search_params(context, **kwargs): """Update the set parameters of the current request""" params = context["request"].GET.copy() for k, v in kwargs.items(): params[k] = v return params.urlencode()
e3ce5a5a1dadc90bb544a761e154214d7a538f30
3,647,748
def dynamic2message(dynamic_dict: dict) -> Message: """ 将从api获取到的原始动态转换为消息 """ author_name = dynamic_dict['desc']['user_profile']['info']['uname'] dynamic_id = dynamic_dict['desc']['dynamic_id'] if dynamic_dict['desc']['type'] == 1: # 转发或投票 text = f"用户[{author_name}]转发了动态:\n" + dynamic_dict['card']['item']['content'] + "\n---------------------\n" origin_dynamic = dynamic.get_info(dynamic_dict['card']['item']['orig_dy_id']) ori_message = dynamic2message(origin_dynamic) msg = MessageSegment.text(text) + ori_message + MessageSegment.text('\n---------------------') elif dynamic_dict['desc']['type'] == 2: # 图文动态 text = f"用户[{author_name}]发布了动态:\n" + dynamic_dict['card']['item']['description'] msg = MessageSegment.text(text) for i in range(dynamic_dict['card']['item']['pictures_count']): msg = msg + MessageSegment.image(dynamic_dict['card']['item']['pictures'][i]['img_src']) elif dynamic_dict['desc']['type'] == 4: # 纯文字动态 msg = MessageSegment.text(f"用户[{author_name}]发布了动态:\n" + dynamic_dict['card']['item']['content']) elif dynamic_dict['desc']['type'] == 8: # 视频投稿 msg = MessageSegment.text( f"用户[{author_name}]发布了视频:\n" + dynamic_dict['card']['dynamic'] + "\n视频标题:" + dynamic_dict['card'][ 'title'] + "\n视频链接:" + dynamic_dict['card']['short_link']) elif dynamic_dict['desc']['type'] == 64: # 发布专栏 msg = MessageSegment.text(f"用户[{author_name}]发布了专栏:\n" + dynamic_dict['card']['title']) else: msg = MessageSegment.text(f'用户[{author_name}]发布了动态,但无法判断类型') msg = msg + MessageSegment.text(f'\n\n原动态链接:https://t.bilibili.com/{dynamic_id}') return msg
b5330876cb58bf71c73ef9f4d3cbcdd0e583aba6
3,647,749
def dem_to_roughness(src_raster, band=0): """Calculate the roughness for the DEM. Parameters ---------- src_raster : Raster The dem used to calculate the roughness. band : int, optional, default: 0 source band number to use. Returns ------- dst_raster: Raster roughness calculated from the DEM. """ options = dict(band=band+1, format='MEM') ds_src = src_raster.to_gdal_ds() ds = gdal.DEMProcessing('', ds_src, 'Roughness', **options) dst_raster = tgp.read_gdal_ds(ds) return dst_raster
f8ba073560aaf18ab9101befc5a3d1727a7cb93e
3,647,750
from typing import Any from typing import List def batch_matmul_checker( attrs: Any, args: List[relay.expr.Expr], op_name: str ) -> bool: # pylint: disable=unused-variable """Check if dense is supported by TensorRT.""" if get_tensorrt_use_implicit_batch_mode() and len(args[0].checked_type.shape) != len( args[1].checked_type.shape ): logger.info(f"{op_name}: requires use_implict_batch=False.") return False return True
6c83ce1116a88e864bd577ee2dd7d669d43c43a3
3,647,752
def func_run_dynamic(input_file, dynamic_dic, exclude, pprint): """ Execute one dynamic template :param input_file: (string) The template file name :param dynamic_dic: (dict) The dictionary of the dynamic variables :return: """ new_template_filename = create_dynamic_template(input_file, dynamic_dic) t = Template.Template() t.file_path = new_template_filename t.load_sections() t.set_execute_order() t.start_driver() report = t.run() if pprint: t.pprint(exclude_none=exclude) return t
9432eadb4e3735a06f35aedd8fd9bb175ab2ba55
3,647,753
import ctypes def xclGetDeviceInfo2 (handle, info): """ xclGetDeviceInfo2() - Obtain various bits of information from the device :param handle: (xclDeviceHandle) device handle :param info: (xclDeviceInfo pointer) Information record :return: 0 on success or appropriate error number """ libc.xclGetDeviceInfo2.restype = ctypes.c_int libc.xclGetDeviceInfo2.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2)] return libc.xclGetDeviceInfo2(handle, info)
794b6208c19a4f982a9fffb9270a3485299b62eb
3,647,755
def template_failure(request, status=403, **kwargs): """ Renders a SAML-specific template with general authentication error description. """ return render(request, 'djangosaml2/login_error.html', status=status)
fbcc8ad756213b4ba7f44d799c67b67beaad18f8
3,647,756
def zflatten2xyz(z, x=None, y=None): """ flatten an nxm 2D array to [x, y, z] of shape=(n*m, 3)""" if x is None: x = np.arrange(0, z.shape[0], step=1) if y is None: y = np.arrange(0, z.shape[1], step=1) xlen = len(x) ylen = len(y) assert z.shape[0] == xlen and z.shape[1] == ylen, 'check dimensions!!!' xx, yy = np.meshgrid(x, y) xx = xx.T yy = yy.T # meshgrid take the second dimension as x xylen = xlen*ylen return np.concatenate((xx.reshape((xylen, 1)), yy.reshape((xylen, 1)), z.reshape((xylen, 1))), axis=1)
96fcc9755660a85f5501958cf3f7d8c7a0e35b69
3,647,757
def penalized_log_likelihood(curve,t,pairwise_contact_matrix,a,b,term_weights,square_root_speed=None,pairwise_distance_matrix=None): """ penalized log likelihood """ if pairwise_distance_matrix is None: #if the do not already have the pairwise distance matrix computed, then compute it pairwise_distance_matrix=compute_pairwise_distance(curve) L=0 # initialize log likelihood term R1=0 # initialize first order term R2=0 # initialize second order term Q=0 # initialize parametrization penalty term S=0 # initialize shape prior term if term_weights[0]!=0: L=term_weights[0]*loglikelihood_Varoquaux_with_missing(pairwise_distance_matrix,a,b,pairwise_contact_matrix) if (term_weights[1]!=0)&(term_weights[2]==0): R1=term_weights[1]*srvf.length(curve,t) if (term_weights[2]!=0): R1,R2=srvf.roughness2(curve,t) R1=term_weights[1]*R1 R2=term_weights[2]*R2 if (term_weights[3]!=0): Q=term_weights[3]*parametrization_error(curve,square_root_speed,t) if (term_weights[4]!=0): S=term_weights[4]*0 # not implemented yet return L-R1-R2-Q-S
f26b5148b5f56be958d99714e5207417fd40a15d
3,647,758
def dict2array(X): """ Returns a Numpy array from dictionary Parameters ---------- X: dict """ all_var = [] for k in X.keys(): all_var.append(X[k]) return np.array(all_var)
e3d1ecabe9897af7c60a8e4be1e92603619d130a
3,647,760
import requests def dfs_level_details(): """This function traverses all levels in a DFS style. It gets the child directories and recursively calls the same function on child directories to extract its level details Returns: Dictionary: Key is the level name, value is a list with first element as url and the second element as the bounding box of that url """ level_details = {} local_server_name = app.config['HOST_NAME'] if 'HOST_NAME' in app.config else "Unknown" try: bounding_box_level = GeographyHelper.GetCoordinatesForLevel(local_server_name) except: print("An error has occured while retrieveing bounding box") bounding_box_level = None level_details[local_server_name] = [request.url_root, bounding_box_level] locations_to_urls = DirectoryNameToURL.objects(relationship='child').all() if locations_to_urls == None: return None for location_to_url in locations_to_urls: request_url = urljoin(location_to_url.url, url_for('api.dfs_level_details')) try: response = requests.get(request_url) if response.status_code != 200: return jsonify(response.json()), response.status_code results = response.json() if results == None or len(results) == 0: continue for result in results: level_details[result] = results[result] except: return jsonify(ERROR_JSON), 400 return jsonify(level_details), 200
31cf9fdf49620798e9411dda1eda99b95411858b
3,647,762
import copy def makeNonParameterized(p): """Return a new Pointset stripped of its parameterization. """ if isinstance(p, Pointset) and p._isparameterized: return Pointset({'coordarray': copy(p.coordarray), 'coordnames': copy(p.coordnames), 'norm': p._normord, 'labels': copy(p.labels)}) else: raise TypeError("Must provide a parameterized Pointset")
778eed55d3da10dcfb4681484cb31d6469009ae8
3,647,763