content
stringlengths
22
815k
id
int64
0
4.91M
def plot_validation_curves(df, root_path, result_path, dataset='standardized_MNIST_dataset', scenario=1, dataset_dimension=28, architecture='FC', epochs=50): """ Plot for same scenario and data dimensions. Each curve represents the model trained with different amount of data. :param df: pandas dataframe from the json, already flatten :param root_path: path where to retrieve results :param result_path: path where to store the plots :param dataset: string with dataset name :param scenario: int, representing the scenario :param dataset_dimension: int, dimension of the mnist - dataset :param architecture: str, architecture type :param epochs: int, max number of epochs """ experiment_keys = {'scenario': scenario, 'dataset_name': dataset, 'dataset_dimensions': dataset_dimension, 'architecture': architecture, 'epochs': epochs} n_training_list = list(set(df['n_training'])) # for all possible n_training output_path = join(result_path, dataset, architecture, 'validation') os.makedirs(output_path, exist_ok=True) # we build the validation folder fig, ax = plt.subplots(figsize=(15, 5), nrows=1, ncols=2) plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) for n_ in sorted(n_training_list): experiment_keys['n_training'] = n_ # we want to plot the very best results # across all learning rates and batch sizes if epochs == 50: experiment_keys['batch_size'] = 10 if n_ < 10 else 32 index_list = generate_bm(df, experiment_keys=experiment_keys)['id'].values best_id = search_best_id(root_path, index_list) df_hist = pd.read_csv(join(root_path, 'train_%i/history.csv' % best_id)) # we retrieve id ax[0].plot(df_hist['loss'], label='t: %i' % n_) ax[0].plot(df_hist['val_loss'], '--', label='v') # validation loss ax[0].set_xlabel('# epoch', fontsize='xx-large') ax[0].set_ylabel('cross entropy', fontsize='xx-large') ax[0].legend(fontsize='x-large') ax[0].set_ylim([0, 2]) ax[1].plot(df_hist['accuracy'], label='t: %i' %n_) ax[1].plot(df_hist['val_accuracy'], '--', label='v') # validation accuracy ax[1].set_xlabel('# epoch', fontsize='xx-large') ax[1].set_ylabel('accuracy', fontsize='xx-large') ax[1].legend(fontsize='x-large') fig.suptitle('Validation results', fontsize='xx-large') fig.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.savefig(join(output_path, 'scenario_%i_dim_%i.pdf' % (scenario, dataset_dimension))) plt.close()
5,341,700
def _whctrs(anchor): """return width, height, x center, and y center for an anchor (window).""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr
5,341,701
def filter_tiddlers(tiddlers, filters, environ=None): """ Return a generator of tiddlers resulting from filtering the provided iterator of tiddlers by the provided filters. If filters is a string, it will be parsed for filters. """ if isinstance(filters, basestring): filters, _ = parse_for_filters(filters, environ) return recursive_filter(filters, tiddlers)
5,341,702
def apply_move(board_state, move, side): """Returns a copy of the given board_state with the desired move applied. Args: board_state (3x3 tuple of int): The given board_state we want to apply the move to. move (int, int): The position we want to make the move in. side (int): The side we are making this move for, 1 for the first player, -1 for the second player. Returns: (3x3 tuple of int): A copy of the board_state with the given move applied for the given side. """ move_x, move_y = move def get_tuples(): for x in range(3): if move_x == x: temp = list(board_state[x]) temp[move_y] = side yield tuple(temp) else: yield board_state[x] return tuple(get_tuples())
5,341,703
def merge(dicts, overwrite=False, append=False, list_of_dicts=False): """ merge dicts, starting with dicts[1] into dicts[0] Parameters ---------- dicts : list[dict] list of dictionaries overwrite : bool if true allow overwriting of current data append : bool if true and items are both lists, then add them list_of_dicts: bool treat list of dicts as additional branches Examples -------- >>> from pprint import pprint >>> d1 = {1:{"a":"A"},2:{"b":"B"}} >>> d2 = {1:{"a":"A"},2:{"c":"C"}} >>> pprint(merge([d1,d2])) {1: {'a': 'A'}, 2: {'b': 'B', 'c': 'C'}} >>> d1 = {1:{"a":["A"]}} >>> d2 = {1:{"a":["D"]}} >>> pprint(merge([d1,d2],append=True)) {1: {'a': ['A', 'D']}} >>> d1 = {1:{"a":"A"},2:{"b":"B"}} >>> d2 = {1:{"a":"X"},2:{"c":"C"}} >>> merge([d1,d2],overwrite=False) Traceback (most recent call last): ... ValueError: different data already exists at "1.a": old: A, new: X >>> merge([{},{}],overwrite=False) {} >>> merge([{},{'a':1}],overwrite=False) {'a': 1} >>> pprint(merge([{},{'a':1},{'a':1},{'b':2}])) {'a': 1, 'b': 2} >>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}])) Traceback (most recent call last): ... ValueError: different data already exists at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}] >>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}], list_of_dicts=True)) Traceback (most recent call last): ... ValueError: list of dicts are of different lengths at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}] >>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}, {"e": 4}]}], list_of_dicts=True)) {'a': [{'b': 1, 'd': 3}, {'c': 2, 'e': 4}]} """ # noqa: E501 outdict = copy.deepcopy(dicts[0]) def single_merge(a, b): return _single_merge(a, b, overwrite=overwrite, append=append, list_of_dicts=list_of_dicts) reduce(single_merge, [outdict] + dicts[1:]) return outdict
5,341,704
def compute_delivery_period_index(frequency = None, delivery_begin_dt_local = None, delivery_end_date_local = None, tz_local = None, profile = None, ): """ Computes the delivery period index of a given contract. :param frequency: The type of delivery contract (year, month, etc.) :param delivery_begin_dt_local: The beginning datetime of the delivery :param delivery_end_date_local: The end date of the delivery :param local_tz: The local timezone :param profile: The profile of the contract :type frequency: string :type delivery_begin_dt_local: pd.Timestamp :type delivery_end_date_local: pd.Timestamp :type local_tz: pytz.tzfile :type profile: string :return: The delivery period index :rtype: int """ if ( pd.isnull(delivery_begin_dt_local) or frequency == global_var.contract_frequency_unknown or frequency == global_var.contract_frequency_spread ): return global_var.contract_delivery_period_index_unknown assert tz_local assert delivery_begin_dt_local.tz.zone == (tz_local if type(tz_local) == str else tz_local.zone ), (delivery_begin_dt_local.tz.zone, tz_local, ) if frequency == global_var.contract_frequency_half_hour: ans = int('{0:0>2}{1:0>2}{2:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, delivery_begin_dt_local.hour, delivery_begin_dt_local.minute, )) elif frequency == global_var.contract_frequency_hour: ans = int('{0:0>2}{1:0>2}{2:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, delivery_begin_dt_local.hour, )) elif frequency == global_var.contract_frequency_bloc: bloc_match = re.compile(global_var.contract_profile_bloc_pattern).match(profile) hour1 = int(bloc_match.group(1)) hour2 = int(bloc_match.group(2)) assert hour1 < hour2 ans = int('{0:0>2}{1:0>2}{2:0>2}{3:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, hour1, hour2, )) elif frequency == global_var.contract_frequency_day: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_days: ans = int('{0:0>2}{1:0>2}{2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, int(( delivery_end_date_local - delivery_begin_dt_local.replace(hour = 0, minute = 0) ).total_seconds()/(3600*24)), )) elif frequency == global_var.contract_frequency_weekend: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_week: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_bow: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_month: ans = delivery_begin_dt_local.month elif frequency == global_var.contract_frequency_bom: ans = int('{0:0>2}{1:0>2}'.format(delivery_begin_dt_local.month, delivery_begin_dt_local.day, )) elif frequency == global_var.contract_frequency_quarter: ans = (delivery_begin_dt_local.month//3)+1 elif frequency == global_var.contract_frequency_season: if delivery_begin_dt_local.month == 4: ans = global_var.contract_delivery_period_index_summer elif delivery_begin_dt_local.month == 10: ans = global_var.contract_delivery_period_index_winter else: raise ValueError(frequency, delivery_begin_dt_local) elif frequency == global_var.contract_frequency_year: ans = global_var.contract_delivery_period_index_year else: raise NotImplementedError(frequency, delivery_begin_dt_local) return ans
5,341,705
def make_proxy(global_conf, address, allowed_request_methods="", suppress_http_headers=""): """ Make a WSGI application that proxies to another address: ``address`` the full URL ending with a trailing ``/`` ``allowed_request_methods``: a space seperated list of request methods (e.g., ``GET POST``) ``suppress_http_headers`` a space seperated list of http headers (lower case, without the leading ``http_``) that should not be passed on to target host """ allowed_request_methods = aslist(allowed_request_methods) suppress_http_headers = aslist(suppress_http_headers) return Proxy( address, allowed_request_methods=allowed_request_methods, suppress_http_headers=suppress_http_headers)
5,341,706
def test(): """Runs the unit tests without test coverage.""" tests = unittest.TestLoader().discover('eachday/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
5,341,707
async def save_training_result(r: dependency.TrainingResultHttpBody): """ Saves the model training statistics to the database. This method is called only by registered dataset microservices. :param r: Training Result with updated fields sent by dataset microservice :return: {'status': 'success'} if successful update, else http error. """ tr = get_training_result_by_training_id(r.training_id) tr.training_accuracy = r.results['training_accuracy'] tr.validation_accuracy = r.results['validation_accuracy'] tr.training_loss = r.results['training_loss'] tr.validation_loss = r.results['validation_loss'] tr.loss_config = r.results['loss_config'] tr.optimizer_config = r.results['optimizer_config'] tr.complete = True update_training_result_db(tr) return { 'status': 'success', 'detail': 'Training data successfully updated.' }
5,341,708
def sort2nd(xs): """Returns a list containing the same elements as xs, but sorted by their second elements.""" xs.sort(cmp2nd) return xs
5,341,709
def is_one_of_type(val, types): """Returns whether the given value is one of the given types. :param val: The value to evaluate :param types: A sequence of types to check against. :return: Whether the given value is one of the given types. """ result = False val_type = type(val) for tt in types: if val_type is tt: result = True return result
5,341,710
def load_qadata(qa_dir): """ :param qa_dir: the file path of the provided QA dataset, eg: /data/preprocessed_data_10k/test; :return: the dictionary of the QA dataset, for instance QA_1_; """ print("begin_load_qadata") qa_set = {} # os.walk: generates the file names in a directory tree by walking the tree. # default: top, which is used to yield 3-tuples, # i.e., (dirpath, dirnames, filenames) for each directory rooted at directory for root, dirnames, filenames in os.walk(qa_dir): if(dirnames == []): qa_id = root[root.rfind("_")+1:] qa_dict ={} for filename in fnmatch.filter(filenames, '*.txt'): pattern = re.compile('QA_\d+_') # re.sub: substitute the pattern with "" in filename. keystr = re.sub(pattern,"", filename).replace(".txt","") qa_dict[keystr] = open(root+"/"+filename).readlines() qa_set[qa_id] = qa_dict print("load_qadata_success") return qa_set
5,341,711
def get_online(cheat_id): """Получение онлайна чита --- consumes: - application/json parameters: - in: path name: cheat_id type: string description: ObjectId чита в строковом формате responses: 200: description: Успешный запрос 400: schema: $ref: '#/definitions/Error' """ count = 0 if cheat_id in online_counter_dict: for _ in online_counter_dict[cheat_id]: count += 1 return make_response({'online': count}), 400
5,341,712
def grpc_client_connection(svc: str = None, target: str = None, session: Session = None) -> Channel: """ Create a new GRPC client connection from a service name, target endpoint and session @param svc: The name of the service to which we're trying to connect (ex. blue) @param target: The endpoint, associated with the service, to which the connection should direct its GRPC requests @param session: The session to associate with the connection. This object will be used to authenticate with the service """ # First, set the session and target to default values if they weren't provided session = session if session else Session() target = target if target else BLUE_ENDPOINT # Next, get the access token from the session and then embed # it into credentials we can send to the GRPC service token = session.access_token() credentials = composite_channel_credentials( ssl_channel_credentials(), access_token_call_credentials(token)) # Now, create a secure channel from the target and credentials if svc: conn = secure_channel( target = target, credentials = credentials, options = (('grpc.enable_http_proxy', 0),), interceptors = [ _header_adder_interceptor("service-name", svc), _header_adder_interceptor("x-agent", "blue-sdk-python")]) else: conn = secure_channel(target = target, credentials = credentials) # Return the connection return conn
5,341,713
def test_product_category_relation( db_session: Session, example_categories: List[Category], example_products_without_category: List[Product] ): """Check whether orm adds products along with category""" example_categories[0].products = example_products_without_category db_session.add(example_categories[0]) db_session.commit() categories: List[Category] = db_session.query(Category).all() assert categories[0].products == example_products_without_category
5,341,714
def aqi(pm25): """AQI Calculator Calculates AQI from PM2.5 using EPA formula and breakpoints from: https://www.airnow.gov/sites/default/files/2018-05/aqi-technical -assistance-document-may2016.pdf Args: - pm25 (int or float): PM2.5 in ug/m3 """ if pm25 < 0: raise ValueError("PM2.5 must be positive.") else: # round PM2.5 to nearest tenth for categorization pm25 = np.round(pm25, 1) green = { "aqi_low": 0, "aqi_hi": 50, "pm_low": 0.0, "pm_hi": 12.0 } yellow = { "aqi_low": 51, "aqi_hi": 100, "pm_low": 12.1, "pm_hi": 35.4 } orange = { "aqi_low": 101, "aqi_hi": 150, "pm_low": 35.5, "pm_hi": 55.4 } red = { "aqi_low": 151, "aqi_hi": 200, "pm_low": 55.5, "pm_hi": 150.4 } purple = { "aqi_low": 201, "aqi_hi": 300, "pm_low": 150.5, "pm_hi": 250.4 } maroon = { "aqi_low": 301, "aqi_hi": 500, "pm_low": 250.5, "pm_hi": 500.4 } colors = [green, yellow, orange, red, purple, maroon] categorized = False # Assign measurement to AQI category. for color in colors: if pm25 >= color["pm_low"] and pm25 <= color["pm_hi"]: cat = color categorized = True break # else: # pass # Put in highest category if still not assigned. if not categorized: cat = colors[-1] # EPA formula for AQI. aqi_num = (cat["aqi_hi"] - cat["aqi_low"]) / \ (cat["pm_hi"] - cat["pm_low"]) * \ (pm25 - cat["pm_low"]) + cat["aqi_low"] return aqi_num
5,341,715
def projectionManip(*args, **kwargs): """ Various commands to set the manipulator to interesting positions. In query mode, return type is based on queried flag. Flags: - fitBBox : fb (bool) [create] Fit the projection manipulator size and position to the shading group bounding box. The orientation is not modified. - projType : pt (int) [create] Set the projection type to the given value. Projection type values are: 1 = planar.2 = spherical.3 = cylindrical.4 = ball.5 = cubic.6 = triplanar.7 = concentric.8 = camera. - switchType : st (bool) [create] Loop over the allowed types. If the hardware shading is on, it loops over the hardware shadeable types (planar, cylindrical, spherical), otherwise, it loops over all the types. If there is no given value, it loops over the different projection types. Flag can have multiple arguments, passed either as a tuple or a list. Derived from mel command `maya.cmds.projectionManip` """ pass
5,341,716
def get_fighters(url): """ Scrape fighter data """ response = simple_get(url) if response is not None: html = BeautifulSoup(response, 'html.parser') html_tr = html.find_all('tr') # html_td = html.find_all('td') # for fighter in html.find_all('div', class_ = 'odd') # print(html_tr) # print(html_td) print(len(html_tr)) # print(type(html_td)) else: # Raise an exception if we failed to get any data from the url raise Exception('Error retrieving contents at {}'.format(url)) print(html_tr[3]) print(type(html_tr[3])) # for line in html_tr: # print(line) # for num in range(len(html_tr)): # print(num) # for line in html_tr[num]: # print(line)
5,341,717
def require_lock(model, lock='ACCESS EXCLUSIVE'): """ Decorator for PostgreSQL's table-level lock functionality Example: @transaction.commit_on_success @require_lock(MyModel, 'ACCESS EXCLUSIVE') def myview(request) ... PostgreSQL's LOCK Documentation: http://www.postgresql.org/docs/8.3/interactive/sql-lock.html """ def require_lock_decorator(view_func): def wrapper(*args, **kwargs): if lock not in LOCK_MODES: raise ValueError('%s is not a PostgreSQL supported lock mode.') from django.db import connection cursor = connection.cursor() cursor.execute( 'LOCK TABLE %s IN %s MODE' % (model._meta.db_table, lock) ) return view_func(*args, **kwargs) return wrapper return require_lock_decorator
5,341,718
def welcome(): """List all available api routes.""" # Set the app.route() decorator for the "/api/v1.0/precipitation" route return ( f"Available Routes:<br/>" f"/api/v1.0/names<br/>" f"/api/v1.0/precipitation" )
5,341,719
def train_filter_keras_model(config: Config) -> None: """Train a Filter model""" train_seq = FilterModelSequence(config, "training") valid_seq = FilterModelSequence(config, "validation") product_input_layer = Input(shape=(config["fingerprint_len"],)) product_dense_layer = Dense(config["model"]["hidden_nodes"], activation="elu")( product_input_layer ) product_droput_layer = Dropout(config["model"]["drop_out"])(product_dense_layer) reaction_input_layer = Input(shape=(config["fingerprint_len"],)) reaction_dense_layer = Dense(config["model"]["hidden_nodes"], activation="elu")( reaction_input_layer ) cosine_layer = Dot(-1, normalize=True)([product_droput_layer, reaction_dense_layer]) output_layer = Dense(1, activation="sigmoid")(cosine_layer) model = Model( inputs=[product_input_layer, reaction_input_layer], outputs=output_layer ) _train_keras_model( model, train_seq, valid_seq, "binary_crossentropy", ["accuracy"], config )
5,341,720
def pbootstrap(data, R, fun, initval = None, ncpus = 1): """ :func pbootstrap: Calls boot method for R iteration in parallel and gets estimates of y-intercept and slope :param data: data - contains dataset :param R: number of iterations :param func: optim - function to get estimate of y-intercept and slope :param initval: initial guess of y-intercept and slope can be passed - optional :param ncpus: number of physical cores to run the pbootstrap method - optional :return: estimates of y-intercept and slope """ N = data.shape[0] thetas = Parallel(ncpus) (delayed(boot) (data, N, fun, initval) for _ in range(R)) return np.asarray(thetas)
5,341,721
def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
5,341,722
def _prettify_xml(elem, level=0): """Adds indents. Code of this method was copied from http://effbot.org/zone/element-lib.htm#prettyprint """ i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: _prettify_xml(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i
5,341,723
def test_inverse_force(): """Testing that we can convert amounts of substance.""" E = Q_(1, "eV/angstrom") E2 = E.to("kcal/mol/angstrom") assert f"{E:~} = {E2:~.4}" == "1 eV / Å = 23.06 kcal / mol / Å"
5,341,724
def create_folders(path): """ create folders with class names 0 - 9 :param path: global path of the parent folder [str] :return: None """ for i in range(10): # 0 - 9 if not os.path.exists(os.path.join(path, str(i))): os.mkdir(os.path.join(path, str(i)))
5,341,725
def create_LOFAR_configuration(antfile: str, meta: dict = None) -> Configuration: """ Define from the LOFAR configuration file :param antfile: :param meta: :return: Configuration """ antxyz = numpy.genfromtxt(antfile, skip_header=2, usecols=[1, 2, 3], delimiter=",") nants = antxyz.shape[0] assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape anames = numpy.genfromtxt(antfile, dtype='str', skip_header=2, usecols=[0], delimiter=",") mounts = numpy.repeat('XY', nants) location = EarthLocation(x=[3826923.9] * u.m, y=[460915.1] * u.m, z=[5064643.2] * u.m) fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, frame='global', diameter=35.0) return fc
5,341,726
def fmla_for_filt(filt): """ transform a set of column filters from a dictionary like { 'varX':['lv11','lvl2'],...} into an R selector expression like 'varX %in% c("lvl1","lvl2")' & ... """ return ' & '.join([ '{var} %in% c({lvls})'.format( var=k, lvls=','.join(map(lambda x:'"%s"' % x, v)) if type(v) == list else '"%s"' % v ) for k, v in filt.items() ])
5,341,727
def sfen_board(ban): """Convert ban (nrow*nrow array) to sfen string """ s = '' num = 0 for iy in range(nrow): for ix in range(nrow): i = iy*nrow + ix if ban[i]: if num: s += str(num) num = 0 s += ban[i] else: num += 1 if iy < 8: if num: s += str(num) num = 0 s += '/' return s
5,341,728
def join_simple_tables(G_df_dict, G_data_info, G_hist, is_train, remain_time): """ 获得G_df_dict['BIG'] """ start = time.time() if is_train: if 'relations' in G_data_info: G_hist['join_simple_tables'] = [x for x in G_data_info['relations'] if x['type'] == '1-1' and x['related_to_main_table'] == 'true'] else: G_hist['join_simple_tables'] = [] time_budget = G_data_info['time_budget'] Id = G_data_info['target_id'] target = G_data_info['target_label'] main_table_name = G_data_info['target_entity'] log('[+] join simple tables') G_df_dict['BIG'] = G_df_dict[main_table_name] # 如果为时序数据,对BIG表排序 if G_data_info['target_time'] != '': G_df_dict['BIG'].sort_values(by=G_data_info['target_time']) for relation in G_hist['join_simple_tables']: left_table_name = relation['left_entity'] right_table_name = relation['right_entity'] left_on = relation['left_on'] right_on = relation['right_on'] if main_table_name == left_table_name: merge_table_name = right_table_name skip_name = right_on else: merge_table_name = left_table_name left_on, right_on = right_on, left_on skip_name = left_on log(merge_table_name) merge_table = G_df_dict[merge_table_name].copy() merge_table.columns = [x if x in skip_name else merge_table_name + "_" + x for x in merge_table.columns] G_df_dict['BIG'] = G_df_dict['BIG'].merge(merge_table, left_on=left_on, right_on=right_on, how='left') log(f"G_df_dict['BIG'].shape: {G_df_dict['BIG'].shape}") end = time.time() remain_time -= (end - start) log("remain_time: {} s".format(remain_time)) return remain_time
5,341,729
async def create_upload_file(file: UploadFile = File(default="")): """ Takes a csv file uploaded as multipart form data, downloads it. """ newpath = os.path.join(DATA_DIR_NAME, file.filename) with open(newpath, "wb") as buffer: shutil.copyfileobj(file.file, buffer)
5,341,730
def make_model(model_name: str, migration): """ Create model.\n :param model_name: model name in singular form.\n :param migration: if you would like to create migration as well\n :return: None """ if migration: call(['python', 'db.py', 'make:migration', model_name, '-p', 'migrations', '--table', model_name, '--create']) if not os.path.isfile(f'models/{model_name.capitalize()}.py'): with open(f'models/{model_name.capitalize()}.py', 'w') as f: f.write(f''' from config.settings import Model class {model_name.capitalize()}(Model): pass ''') with open(f'models/__init__.py', 'a+') as f: f.write( f'from models.{model_name.capitalize()} import {model_name.capitalize()}\n') click.echo('\033[92mModel Created Successfully!\033[0m') else: click.echo('\033[95mModel Already Exists!\033[0m')
5,341,731
def create_epochs(data, events_onsets, sampling_rate=1000, duration=1, onset=0, index=None): """ Epoching a dataframe. Parameters ---------- data : pandas.DataFrame Data*time. events_onsets : list A list of event onsets indices. sampling_rate : int Sampling rate (samples/second). duration : int or list Duration(s) of each epoch(s) (in seconds). onset : int Epoch onset(s) relative to events_onsets (in seconds). index : list Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number. Returns ---------- epochs : dict dict containing all epochs. Example ---------- >>> import neurokit as nk >>> epochs = nk.create_epochs(data, events_onsets) Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - numpy """ # Convert ints to arrays if needed if isinstance(duration, list) or isinstance(duration, np.ndarray): duration = np.array(duration) else: duration = np.array([duration]*len(events_onsets)) if isinstance(onset, list) or isinstance(onset, np.ndarray): onset = np.array(onset) else: onset = np.array([onset]*len(events_onsets)) if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.Series): data = pd.DataFrame({"Signal": list(data)}) # Store durations duration_in_s = duration.copy() onset_in_s = onset.copy() # Convert to timepoints duration = duration*sampling_rate onset = onset*sampling_rate # Create the index if index is None: index = list(range(len(events_onsets))) else: if len(list(set(index))) != len(index): print("NeuroKit Warning: create_epochs(): events_names does not contain uniques names, replacing them by numbers.") index = list(range(len(events_onsets))) else: index = list(index) # Create epochs epochs = {} for event, event_onset in enumerate(events_onsets): epoch_onset = int(event_onset + onset[event]) epoch_end = int(event_onset+duration[event]+1) epoch = data[epoch_onset:epoch_end].copy() epoch.index = np.linspace(start=onset_in_s[event], stop=duration_in_s[event], num=len(epoch), endpoint=True) relative_time = np.linspace(start=onset[event], stop=duration[event], num=len(epoch), endpoint=True).astype(int).tolist() absolute_time = np.linspace(start=epoch_onset, stop=epoch_end, num=len(epoch), endpoint=True).astype(int).tolist() epoch["Epoch_Relative_Time"] = relative_time epoch["Epoch_Absolute_Time"] = absolute_time epochs[index[event]] = epoch return(epochs)
5,341,732
def is_equal_limit_site( site: SiteToUse, limit_site: SiteToUse, site_class: Type[Site] ) -> None: """Check if site is a limit site.""" if site_class == Site: return site.point.x == limit_site.x and site.point.y == limit_site.y elif site_class == WeightedSite: return ( site.point.x == limit_site[0].x and site.point.y == limit_site[0].y and site.weight == limit_site[1] )
5,341,733
def get_header(filename): """retrieves the header of an image Args: filename (str): file name Returns: (str): header """ im = fabio.open(filename) return im.header
5,341,734
def gaussian_ll_pdf(x, mu, sigma): """Evaluates the (unnormalized) log of the normal PDF at point x Parameters ---------- x : float or array-like point at which to evaluate the log pdf mu : float or array-like mean of the normal on a linear scale sigma : float or array-like standard deviation of the normal on a linear scale """ log_pdf = -0.5*(x - mu)**2.0/sigma**2.0 #- np.log(sigma) - 0.5*np.log(2.0*np.pi) return log_pdf
5,341,735
def _show_traceback(method): """decorator for showing tracebacks in IPython""" def m(self, *args, **kwargs): try: return(method(self, *args, **kwargs)) except Exception as e: ip = get_ipython() if ip is None: self.log.warn("Exception in widget method %s: %s", method, e, exc_info=True) else: ip.showtraceback() return m
5,341,736
def ecdf(data): """Compute ECDF for a one-dimensional array of measurements.""" # Number of data points n = len(data) # x-data for the ECDF x = np.sort(data) # y-data for the ECDF y = np.arange(1, len(x)+1) / n return x, y
5,341,737
def import_module_part(request, pk): """Module part import. Use an .xlsx file to submit grades to a module part On GET the user is presented with a file upload form. On POST, the submitted .xlsx file is processed by the system, registering Grade object for each grade in the excel file. It dynamically detects the tests that are submitted (by exact name match or database ID), and omits extra columns silently. Also, lines that do not have a filled in student number are ignored. Students that are not declared as part of the module (def:import_student_to_module) raise an import error. :param request: Django request :param pk: Module part that grades should be submitted to :return: A redirect to the Grades course view on success. Otherwise a 404 (module does not exist), 403 (no permissions) or 400 (bad excel file or other import error) """ module_part = get_object_or_404(ModulePart, pk=pk) module_edition = get_object_or_404(ModuleEdition, modulepart=module_part) person = Person.objects.filter(user=request.user).filter( Q(coordinator__module_edition__modulepart=module_part) | Q(teacher__module_part=module_part) ).first() if not ModuleEdition.objects.filter(modulepart=module_part): raise Http404('Module does not exist.') if not (is_coordinator_or_assistant_of_module(person, module_edition) or is_coordinator_or_teacher_of_module_part(person, module_part)): raise PermissionDenied('You are not allowed to do this.') if request.method == "POST": form = GradeUploadForm(request.POST, request.FILES) if form.is_valid(): title_row = form.cleaned_data.get('title_row') - 1 # Check if /any/ tests and/or grades are imported. any_tests = False # List of all tests that are imported. all_tests = [] sheet = request.FILES['file'].get_book_dict() for table in sheet: # Check if the sheet has enough rows if title_row >= len(sheet[table]): return bad_request(request, {'message': 'The file that was uploaded was not recognised as a grade' ' excel file. Are you sure the file is an .xlsx file, and' ' that all fields are present? Otherwise, download a new' ' gradesheet and try using that instead.'}) test_rows = dict() university_number_field = None # Detect university_number and test columns for title_index in range(0, len(sheet[table][title_row])): # This is the university number column if ('number' in str(sheet[table][title_row][title_index]).lower()) or \ ('nummer' in str(sheet[table][title_row][title_index]).lower()): university_number_field = title_index else: # Attempt to find a Test # search by ID try: test = Test.objects.filter( pk=sheet[table][title_row][title_index]) if test and test.filter(module_part=module_part): test_rows[title_index] = sheet[table][title_row][title_index] # pk of Test any_tests = True except (ValueError, TypeError): pass # Not an int. # search by name if Test.objects.filter(module_part=module_part).filter( name=sheet[table][title_row][title_index]): test_rows[title_index] = Test.objects.filter( name=sheet[table][title_row][title_index] ).filter(module_part=module_part)[0].pk # pk of Test any_tests = True # Attempt to ignore test altogether. else: pass if university_number_field is None: continue # Ignore this sheet if len(test_rows.keys()) == 0: continue # Ignore this sheet # The current user's Person is the corrector of the grades. teacher = Person.objects.filter(user=request.user).first() grades = [] # Retrieve Test object beforehand to validate permissions on tests and speed up Grade creation tests = dict() for test_column in test_rows.keys(): tests[test_column] = Test.objects.get(pk=test_rows[test_column]) [all_tests.append(test) for test in tests.values() if test] # Check excel file for invalid students invalid_students = [] for row in sheet[table][(title_row + 1):]: if not Studying.objects.filter(person__university_number__contains=row[university_number_field]).filter( module_edition=module_edition): invalid_students.append(row[university_number_field]) # Check for invalid student numbers in the university_number column, but ignore empty fields. if [student for student in invalid_students if student is not '']: return bad_request(request, {'message': 'Students {} are not enrolled in this module.\n ' 'Enroll these students first before retrying' .format(invalid_students)}) # Make Grades for row in sheet[table][(title_row + 1):]: # Walk horizontally over table student = Person.objects.filter(university_number__contains=row[university_number_field]).first() # check if this is not an empty line, else continue. if student: for test_column in test_rows.keys(): try: grades.append(make_grade( student=student, corrector=teacher, test=tests[test_column], grade=row[test_column] )) except GradeException as e: # Called for either: bad grade, grade out of bounds return bad_request(request, {'message': e}) save_grades(grades) # Bulk-save grades. Also prevents a partial import of the sheet. # Check if anything was imported. if not any_tests: return bad_request(request, {'message': 'There were no tests recognized to import.'}) return render(request=request, template_name='importer/successfully_imported.html', context={'tests': all_tests}) else: return bad_request(request, {'message': 'The file uploaded was not recognised as a grade excel file.' ' Are you sure the file is an .xlsx file? Otherwise, download a new' ' gradesheet and try using that instead'}) else: # GET request form = GradeUploadForm() return render(request, 'importer/importmodulepart.html', {'form': form, 'pk': pk, 'module_part': module_part})
5,341,738
def ordered_pair(x: complex) -> Tuple[float, float]: """ Returns the tuple (a, b), like the ordered pair in the complex plane """ return (x.real, x.imag)
5,341,739
def find_fits_file(plate_dir_list, fits_partial_path): """ Returns a path :rtype : basestring """ for plate_dir in plate_dir_list: fits_path = os.path.join(plate_dir, fits_partial_path) if os.path.exists(fits_path): return fits_path return None
5,341,740
def HornFromDL(owlGraph, safety=DATALOG_SAFETY_NONE, derivedPreds=[], complSkip=[]): """ Takes an OWL RDF graph, an indication of what level of ruleset safety (see: http://code.google.com/p/fuxi/wiki/FuXiUserManual#Rule_Safety) to apply, and a list of derived predicates and returns a Ruleset instance comprised of the rules extracted from the OWL RDF graph (using a variation of the OWL 2 RL transformation) """ from FuXi.Rete.RuleStore import SetupRuleStore ruleStore, ruleGraph, network = SetupRuleStore(makeNetwork=True) return network.setupDescriptionLogicProgramming( owlGraph, derivedPreds=derivedPreds, expanded=complSkip, addPDSemantics=False, constructNetwork=False, safety=safety)
5,341,741
def _assembleMatrix(data, indices, indptr, shape): """ Generic assemble matrix function to create a CSR matrix Parameters ---------- data : array Data values for matrix indices : int array CSR type indices indptr : int array Row pointer shape : tuple-like Actual shape of matrix Returns ------- M : scipy csr sparse matrix The assembled matrix """ M = sparse.csr_matrix((data, indices, indptr), shape) return M
5,341,742
def quantize_model(): """ Create the Quantization Simulation and finetune the model. :return: """ tf.compat.v1.reset_default_graph() # load graph sess = graph_saver.load_model_from_meta('models/mnist_save.meta', 'models/mnist_save') # Create quantsim model to quantize the network using the default 8 bit params/activations sim = quantsim.QuantizationSimModel(sess, starting_op_names=['reshape_input'], output_op_names=['dense_1/BiasAdd'], quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='../../../TrainingExtensions/common/src/python/aimet_common/' 'quantsim_config/default_config.json') # Compute encodings sim.compute_encodings(pass_calibration_data, forward_pass_callback_args=None) # Do some finetuning # User action required # The following line of code illustrates that the model is getting finetuned. # Replace the following train() function with your pipeline's train() function. train(sim)
5,341,743
def mock_state_store(decoy: Decoy) -> StateStore: """Get a mocked out StateStore.""" return decoy.mock(cls=StateStore)
5,341,744
def beam_search_runner_range(output_series: str, decoder: BeamSearchDecoder, max_rank: int = None, postprocess: Callable[ [List[str]], List[str]]=None ) -> List[BeamSearchRunner]: """Return beam search runners for a range of ranks from 1 to max_rank. This means there is max_rank output series where the n-th series contains the n-th best hypothesis from the beam search. Args: output_series: Prefix of output series. decoder: Beam search decoder shared by all runners. max_rank: Maximum rank of the hypotheses. postprocess: Series-level postprocess applied on output. Returns: List of beam search runners getting hypotheses with rank from 1 to max_rank. """ check_argument_types() if max_rank is None: max_rank = decoder.beam_size if max_rank > decoder.beam_size: raise ValueError( ("The maximum rank ({}) cannot be " "bigger than beam size {}.").format( max_rank, decoder.beam_size)) return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r), decoder, r, postprocess) for r in range(1, max_rank + 1)]
5,341,745
def clone_provenance_history(provenance_history_data, ad): """ For a single input's provenance history, copy it into the output `AstroData` object as appropriate. This takes a dictionary with a source filename, md5 and both it's original provenance and provenance_history information. It duplicates the provenance data into the outgoing `AstroData` ad object. Parameters ---------- provenance_history_data : pointer to the `AstroData` table with the history information. *Note* this may be the output `~astrodata.AstroData` as well, so we need to handle that. ad : `astrodata.AstroData` Outgoing `~astrodata.AstroData` object to add provenance history data to. """ phd = [(prov_hist[0], prov_hist[1], prov_hist[2], prov_hist[3]) for prov_hist in provenance_history_data] for ph in phd: add_provenance_history(ad, ph[0], ph[1], ph[2], ph[3])
5,341,746
def read_variants( pipeline, # type: beam.Pipeline all_patterns, # type: List[str] pipeline_mode, # type: PipelineModes allow_malformed_records, # type: bool representative_header_lines=None, # type: List[str] pre_infer_headers=False, # type: bool sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH, # type: int use_1_based_coordinate=False # type: bool ): # type: (...) -> pvalue.PCollection """Returns a PCollection of Variants by reading VCFs.""" compression_type = get_compression_type(all_patterns) if compression_type == filesystem.CompressionTypes.GZIP: splittable_bgzf = _get_splittable_bgzf(all_patterns) if splittable_bgzf: return (pipeline | 'ReadVariants' >> vcfio.ReadFromBGZF(splittable_bgzf, representative_header_lines, allow_malformed_records, pre_infer_headers, sample_name_encoding, use_1_based_coordinate)) if pipeline_mode == PipelineModes.LARGE: variants = (pipeline | 'InputFilePattern' >> beam.Create(all_patterns) | 'ReadAllFromVcf' >> vcfio.ReadAllFromVcf( representative_header_lines=representative_header_lines, compression_type=compression_type, allow_malformed_records=allow_malformed_records, pre_infer_headers=pre_infer_headers, sample_name_encoding=sample_name_encoding, use_1_based_coordinate=use_1_based_coordinate)) else: variants = pipeline | 'ReadFromVcf' >> vcfio.ReadFromVcf( all_patterns[0], representative_header_lines=representative_header_lines, compression_type=compression_type, allow_malformed_records=allow_malformed_records, pre_infer_headers=pre_infer_headers, sample_name_encoding=sample_name_encoding, use_1_based_coordinate=use_1_based_coordinate) if compression_type == filesystem.CompressionTypes.GZIP: variants |= 'FusionBreak' >> fusion_break.FusionBreak() return variants
5,341,747
def carla_location_to_numpy_vector(carla_location): """ Convert a carla location to a icv vector3 Considers the conversion from left-handed system (unreal) to right-handed system (icv) :param carla_location: the carla location :type carla_location: carla.Location :return: a numpy.array with 3 elements :rtype: numpy.array """ return numpy.array([ carla_location.x, -carla_location.y, carla_location.z ])
5,341,748
def test_round_trip_masked_table_default(tmpdir): """Test round-trip of MaskedColumn through HDF5 using default serialization that writes a separate mask column. Note: >>> simple_table(masked=True) <Table masked=True length=3> a b c int64 float64 str1 ----- ------- ---- -- 1.0 c 2 2.0 -- 3 -- e """ filename = str(tmpdir.join('test.h5')) t = simple_table(masked=True) # int, float, and str cols with one masked element t['c'] = [b'c', b'd', b'e'] t['c'].mask[1] = True t.write(filename, format='hdf5', path='root', serialize_meta=True) t2 = Table.read(filename) assert t2.masked is False assert t2.colnames == t.colnames for name in t2.colnames: assert np.all(t2[name].mask == t[name].mask) assert np.all(t2[name] == t[name]) # Data under the mask round-trips also (unmask data to show this). t[name].mask = False t2[name].mask = False assert np.all(t2[name] == t[name])
5,341,749
def main(): """docstring for main""" import argparse distribution = pkg_resources.get_distribution('tldextract') parser = argparse.ArgumentParser( version='%(prog)s ' + distribution.version, description='Parse hostname from a url or fqdn') parser.add_argument('input', metavar='fqdn|url', type=unicode, nargs='*', help='fqdn or url') parser.add_argument('-u', '--update', default=False, action='store_true', help='force fetch the latest TLD definitions') parser.add_argument('-c', '--cache_file', help='use an alternate TLD definition file') args = parser.parse_args() if args.cache_file: TLD_EXTRACTOR.cache_file = args.cache_file if args.update: try: TLD_EXTRACTOR.update(True) except Exception as exc: sys.stderr.write(exc + "\n") exit(2) elif len(args.input) is 0: parser.print_usage() exit(1) for i in args.input: print(' '.join(extract(i)))
5,341,750
async def test_abort_on_connection_error( mock_get_cases: MagicMock, hass: HomeAssistant ) -> None: """Test we abort on connection error.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert "type" in result assert result["type"] == "abort" assert "reason" in result assert result["reason"] == "cannot_connect"
5,341,751
def prev_cur_next(lst): """ Returns list of tuples (prev, cur, next) for each item in list, where "prev" and "next" are the previous and next items in the list, respectively, or None if they do not exist. """ return zip([None] + lst[:-1], lst, lst[1:]) + [(lst[-2], lst[-1], None)]
5,341,752
def vet_input_path(filename): """ Check if the given input file exists. Returns a pathlib.Path object if everything is OK, raises InputFileException if not. """ putative_path = pathlib.Path(filename) if putative_path.exists(): if not putative_path.is_file(): msg = ('A given input file is not infact a file. ' + \ 'You input {}.'.format(putative_path)) raise InputFileException(msg) else: msg = ('Could not find a specified input file. You input {}.'.format( putative_path)) raise InputFileException(msg) return putative_path
5,341,753
def check_for_updates(repo: str = REPO) -> str: """ Check for updates to the current version. """ message = "" url = f"https://api.github.com/repos/{repo}/releases/latest" response = requests.get(url) if response.status_code != 200: raise RuntimeError( f"Failed to get commit count. Status code: {response.status_code}" ) data = response.json() latest_version = data["name"] # returns "vx.x.x" current_version = f"v{_version.__version__}" # returns "vx.x.x" if latest_version != current_version: message = f"New version available: {latest_version}.\n\n" else: message = "No updates available.\n\n" master = get_status(current_version, "master") dev = get_status(current_version, "dev") for branch in ["master", "dev"]: name = branch.capitalize() if branch == "master": status, ahead_by, behind_by = master else: status, ahead_by, behind_by = dev if status == "behind": message += f"{name} is {status} by {behind_by} commits.\n" elif status == "ahead": message += f"{name} is {status} by {ahead_by} commits.\n" else: message += f"{name} is up to date.\n" return message
5,341,754
def _process_image(record, training): """Decodes the image and performs data augmentation if training.""" image = tf.io.decode_raw(record, tf.uint8) image = tf.cast(image, tf.float32) image = tf.reshape(image, [32, 32, 3]) image = image * (1. / 255) - 0.5 if training: padding = 4 image = tf.image.resize_with_crop_or_pad(image, 32 + padding, 32 + padding) image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image) return image
5,341,755
def svn_diff_fns2_invoke_token_discard(*args): """svn_diff_fns2_invoke_token_discard(svn_diff_fns2_t _obj, void diff_baton, void token)""" return _diff.svn_diff_fns2_invoke_token_discard(*args)
5,341,756
def process_bb(model, I, bounding_boxes, image_size=(412, 412)): """ :param model: A binary model to create the bounding boxes :param I: PIL image :param bounding_boxes: Bounding boxes containing regions of interest :param image_size: Choose the size of the patches :return: Patches with the class of the ROIS """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') patches = np.array([]) normalization = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) for (x, y, w, h) in bounding_boxes: patch = np.array(I.crop((x, y, x + w, y + h))) patch = cv2.resize(patch, image_size) patch = normalization(patch).unsqueeze(0).to(device) patch = model(patch).cpu().detach().numpy() patches = np.concatenate(patches, patch) return patches
5,341,757
def roc( observations, forecasts, bin_edges="continuous", dim=None, drop_intermediate=False, return_results="area", ): """Computes the relative operating characteristic for a range of thresholds. Parameters ---------- observations : xarray.Dataset or xarray.DataArray Labeled array(s) over which to apply the function. If ``bin_edges=='continuous'``, observations are binary. forecasts : xarray.Dataset or xarray.DataArray Labeled array(s) over which to apply the function. If ``bin_edges=='continuous'``, forecasts are probabilities. bin_edges : array_like, str, default='continuous' Bin edges for categorising observations and forecasts. Similar to np.histogram, \ all but the last (righthand-most) bin include the left edge and exclude the \ right edge. The last bin includes both edges. ``bin_edges`` will be sorted in \ ascending order. If ``bin_edges=='continuous'``, calculate ``bin_edges`` from \ forecasts, equal to ``sklearn.metrics.roc_curve(f_boolean, o_prob)``. dim : str, list The dimension(s) over which to compute the contingency table drop_intermediate : bool, default=False Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. Defaults to ``True`` in ``sklearn.metrics.roc_curve``. return_results: str, default='area' Specify how return is structed: - 'area': return only the ``area under curve`` of ROC - 'all_as_tuple': return ``true positive rate`` and ``false positive rate`` at each bin and area under the curve of ROC as tuple - 'all_as_metric_dim': return ``true positive rate`` and ``false positive rate`` at each bin and ``area under curve`` of ROC concatinated into new ``metric`` dimension Returns ------- xarray.Dataset or xarray.DataArray : reduced by dimensions ``dim``, see ``return_results`` parameter. ``true positive rate`` and ``false positive rate`` contain ``probability_bin`` dimension with ascending ``bin_edges`` as coordinates. Examples -------- >>> f = xr.DataArray(np.random.normal(size=(1000)), ... coords=[('time', np.arange(1000))]) >>> o = f.copy() >>> category_edges = np.linspace(-2, 2, 5) >>> xs.roc(o, f, category_edges, dim=['time']) <xarray.DataArray 'histogram_observations_forecasts' ()> array(1.) See also -------- xskillscore.Contingency sklearn.metrics.roc_curve References ---------- http://www.cawcr.gov.au/projects/verification/ """ if dim is None: dim = list(forecasts.dims) if isinstance(dim, str): dim = [dim] continuous = False if isinstance(bin_edges, str): if bin_edges == "continuous": continuous = True # check that o binary if isinstance(observations, xr.Dataset): o_check = observations.to_array() else: o_check = observations if str(o_check.dtype) != "bool": if not ((o_check == 0) | (o_check == 1)).all(): raise ValueError( 'Input "observations" must represent logical (True/False) outcomes', o_check, ) # works only for 1var if isinstance(forecasts, xr.Dataset): varlist = list(forecasts.data_vars) if len(varlist) == 1: v = varlist[0] else: raise ValueError( "Only works for `xr.Dataset` with one variable, found" f"{forecasts.data_vars}. Considering looping over `data_vars`" "or `.to_array()`." ) f_bin = forecasts[v] else: f_bin = forecasts f_bin = f_bin.stack(ndim=forecasts.dims) f_bin = f_bin.sortby(-f_bin) bin_edges = np.append(f_bin[0] + 1, f_bin) bin_edges = np.unique(bin_edges) # ensure that in ascending order else: raise ValueError("If bin_edges is str, it can only be continuous.") else: bin_edges = np.sort(bin_edges) # ensure that in ascending order # loop over each bin_edge and get true positive rate and false positive rate # from contingency tpr, fpr = [], [] for i in bin_edges: dichotomous_category_edges = np.array( [-np.inf, i, np.inf] ) # "dichotomous" means two-category dichotomous_contingency = Contingency( observations, forecasts, dichotomous_category_edges, dichotomous_category_edges, dim=dim, ) fpr.append(dichotomous_contingency.false_alarm_rate()) tpr.append(dichotomous_contingency.hit_rate()) tpr = xr.concat(tpr, "probability_bin") fpr = xr.concat(fpr, "probability_bin") tpr["probability_bin"] = bin_edges fpr["probability_bin"] = bin_edges fpr = fpr.fillna(1.0) tpr = tpr.fillna(0.0) # pad (0,0) and (1,1) fpr_pad = xr.concat( [ xr.ones_like(fpr.isel(probability_bin=0, drop=False)), fpr, xr.zeros_like(fpr.isel(probability_bin=-1, drop=False)), ], "probability_bin", ) tpr_pad = xr.concat( [ xr.ones_like(tpr.isel(probability_bin=0, drop=False)), tpr, xr.zeros_like(tpr.isel(probability_bin=-1, drop=False)), ], "probability_bin", ) if drop_intermediate and fpr.probability_bin.size > 2: fpr, tpr = _drop_intermediate(fpr, tpr) fpr_pad, tpr_pad = _drop_intermediate(fpr_pad, tpr_pad) area = _auc(fpr_pad, tpr_pad) if continuous: # sklearn returns in reversed order fpr = fpr.sortby(-fpr.probability_bin) tpr = tpr.sortby(-fpr.probability_bin) # mask always nan def _keep_masked(new, ori, dim): """Keep mask from `ori` deprived of dimensions from `dim` in input `new`.""" isel_dim = {d: 0 for d in forecasts.dims if d in dim} mask = ori.isel(isel_dim, drop=True) new_masked = new.where(mask.notnull()) return new_masked fpr = _keep_masked(fpr, forecasts, dim=dim) tpr = _keep_masked(tpr, forecasts, dim=dim) area = _keep_masked(area, forecasts, dim=dim) if return_results == "area": return area elif return_results == "all_as_metric_dim": results = xr.concat([fpr, tpr, area], "metric", coords="minimal") results["metric"] = [ "false positive rate", "true positive rate", "area under curve", ] return results elif return_results == "all_as_tuple": return fpr, tpr, area else: raise NotImplementedError( "expect `return_results` from [all_as_tuple, area, all_as_metric_dim], " f"found {return_results}" )
5,341,758
def print_forecast(forecast=None): """Print forecast to screen.""" if forecast == None: return print '-'*20 print time.strftime('%Y/%m/%d %H:%M:%S') print "LAT: {0} LON: {1}".format(LAT,LON) print '-'*20 for daily in forecast: print daily
5,341,759
def make_grammar(): """Creates the grammar to be used by a spec matcher.""" # This is apparently how pyparsing recommends to be used, # as http://pyparsing.wikispaces.com/share/view/644825 states that # it is not thread-safe to use a parser across threads. unary_ops = ( # Order matters here (so that '=' doesn't match before '==') Literal("==") | Literal("=") | Literal("!=") | Literal("<in>") | Literal(">=") | Literal("<=") | Literal(">") | Literal("<") | Literal("s==") | Literal("s!=") | # Order matters here (so that '<' doesn't match before '<=') Literal("s<=") | Literal("s<") | # Order matters here (so that '>' doesn't match before '>=') Literal("s>=") | Literal("s>")) all_in_nary_op = Literal("<all-in>") or_ = Literal("<or>") # An atom is anything not an keyword followed by anything but whitespace atom = ~(unary_ops | all_in_nary_op | or_) + Regex(r"\S+") unary = unary_ops + atom nary = all_in_nary_op + OneOrMore(atom) disjunction = OneOrMore(or_ + atom) # Even-numbered tokens will be '<or>', so we drop them disjunction.setParseAction(lambda _s, _l, t: ["<or>"] + t[1::2]) expr = disjunction | nary | unary | atom return expr
5,341,760
def _pull(keys): """helper method for implementing `client.pull` via `client.apply`""" if isinstance(keys, (list,tuple, set)): return [eval(key, globals()) for key in keys] else: return eval(keys, globals())
5,341,761
def position(df): """ 根据交易信号, 计算每天的仓位 :param df: :return: """ # 由 signal 计算出实际每天持有的股票仓位 df['pos'] = df['signal'].shift(1) df['pos'].fillna(method='ffill', inplace=True) # 将涨跌停时不得买卖股票考虑进来 # 找出开盘涨停的日期 cond_cannot_buy = df['开盘价'] > df['收盘价'].shift(1) * 1.097 # 今天的开盘价相对于昨天的收盘价上涨了 9.7% # 将开盘涨停日, 并且当天 position 为 1 时的 'pos' 设置为空值 # ?? 问题:为什么是 1? df.loc[cond_cannot_buy & (df['pos'] == 1), 'pos'] = None # 找出开盘跌停的日期 cond_cannot_buy = df['开盘价'] < df['收盘价'].shift(1) * 0.903 # 今天的开盘价相对于昨天的收盘价下跌了 9.7% # 将开盘跌停日, 并且当天 position 为 0 时的 'pos' 设置为空值 # ?? 问题:为什么是 0? df.loc[cond_cannot_buy & (df['pos'] == 0), 'pos'] = None # position 为空的日期, 不能买卖。position 只能和前一个交易日保持一致。 df['pos'].fillna(method='ffill', inplace=True) # 在 position 为空值的日期, 将 position 补全为 0 df['pos'].fillna(value=0, inplace=True) return df
5,341,762
def prepare_data(data, preprocessed_data, args): """Prepare Data""" data = data.to_numpy() train_size = int(len(data) * args.train_split) test_size = len(data) - train_size train_X = preprocessed_data[0:train_size] train_Y = data[0:train_size] test_X = preprocessed_data[train_size:len(preprocessed_data)] test_Y = data[train_size:len(preprocessed_data)] return train_X, train_Y, test_X, test_Y
5,341,763
def svn_client_conflict_tree_get_victim_node_kind(conflict): """svn_client_conflict_tree_get_victim_node_kind(svn_client_conflict_t * conflict) -> svn_node_kind_t""" return _client.svn_client_conflict_tree_get_victim_node_kind(conflict)
5,341,764
def list(event, production): """ Store a file in the Store. """ click.echo(f"{'Resource':30} {'Hash':32} {'UUID':32}") click.echo("-"*96) for resource, details in this_store.manifest.list_resources(event, production).items(): click.echo(f"{resource:30} {details['hash']:32} {details['uuid']:32}")
5,341,765
def load_module(script_path: str, module_name: str): """ return a module spec.loader.exec_module(foo) foo.A() """ spec = importlib.util.spec_from_file_location(module_name, script_path) module = importlib.util.module_from_spec(spec) return spec, module
5,341,766
def readfile(*paths): """Build a file path from *paths* and return the contents.""" with open(os.path.join(*paths), 'r') as f: return f.read()
5,341,767
def memory_func(func): """ декоратор для замера памяти занимаемой функцией в оперативной памяти. """ def wrapper(*args, **kwargs): proc = Process(getpid()) # получение идентификатора текущего процесса и объявление класса start_memory = proc.memory_info().rss # сохранение начального значения занятой памяти result = func(*args, **kwargs) # выполнение функции с параметрами end_memory = proc.memory_info().rss # замер объема занятой памяти после выполнения функции print(f"Физ. память используемая функцией {func.__name__}: {end_memory-start_memory} байт") # вывод результата return result return wrapper
5,341,768
def read_data(datafile='sampling_data_2015.txt'): """Imports data from an ordered txt file and creates a list of samples.""" sample_list = [] with open(datafile, 'r') as file: for line in file: method, date, block, site, orders = line.split('|') new_sample = sample(method, date, block, site) new_sample.import_orders(orders) sample_list.append(new_sample) return sample_list
5,341,769
async def get_https(method: str = "all"): """Get https proxies from get_proxies_func() function.""" return await get_proxies_func("https", method)
5,341,770
def iam_analysis(obs_spec, model1_pars, model2_pars, rvs=None, gammas=None, verbose=False, norm=False, save_only=True, chip=None, prefix=None, errors=None, area_scale=False, wav_scale=True, norm_method="scalar", fudge=None): """Run two component model over all model combinations.""" rvs = check_inputs(rvs) gammas = check_inputs(gammas) if isinstance(model1_pars, list): logging.debug(__("Number of close model_pars returned {0}", len(model1_pars))) if isinstance(model2_pars, list): logging.debug(__("Number of close model_pars returned {0}", len(model2_pars))) # Solution Grids to return iam_grid_chisqr_vals = np.empty((len(model1_pars), len(model2_pars))) args = [model2_pars, rvs, gammas, obs_spec] kwargs = {"norm": norm, "save_only": save_only, "chip": chip, "prefix": prefix, "verbose": verbose, "errors": errors, "area_scale": area_scale, "wav_scale": wav_scale, "norm_method": norm_method, "fudge": fudge, } for ii, params1 in enumerate(tqdm(model1_pars)): iam_grid_chisqr_vals[ii] = iam_wrapper(ii, params1, *args, **kwargs) if save_only: return None else: return iam_grid_chisqr_vals
5,341,771
def _parse_archive_name(pathname): """Return the name of the project given the pathname of a project archive file. """ return os.path.basename(pathname).split('.')[0]
5,341,772
def download_har_dataset(): """ Download human activity recognition dataset from UCI ML Repository and store it at /tsfresh/notebooks/data. Examples ======== >>> from tsfresh.examples import har_dataset >>> har_dataset.download_har_dataset() """ zipurl = 'https://github.com/MaxBenChrist/human-activity-dataset/blob/master/UCI%20HAR%20Dataset.zip?raw=true' if os.path.exists(data_file_name_dataset) and os.path.exists(data_file_name_classes): _logger.warning("You have already downloaded the Human Activity Data Set.") return with urlopen(zipurl) as zipresp: with ZipFile(BytesIO(zipresp.read())) as zfile: zfile.extractall(path=data_file_name) zfile.close()
5,341,773
def is_authenticated(user, password): """Check if ``user``/``password`` couple is valid.""" global IMAP_WARNED_UNENCRYPTED if not user or not password: return False log.LOGGER.debug( "Connecting to IMAP server %s:%s." % (IMAP_SERVER, IMAP_SERVER_PORT,)) connection_is_secure = False if IMAP_USE_SSL: connection = imaplib.IMAP4_SSL(host=IMAP_SERVER, port=IMAP_SERVER_PORT) connection_is_secure = True else: connection = imaplib.IMAP4(host=IMAP_SERVER, port=IMAP_SERVER_PORT) server_is_local = (IMAP_SERVER == "localhost") if not connection_is_secure: try: connection.starttls() log.LOGGER.debug("IMAP server connection changed to TLS.") connection_is_secure = True except AttributeError: if not server_is_local: log.LOGGER.error( "Python 3.2 or newer is required for IMAP + TLS.") except (imaplib.IMAP4.error, imaplib.IMAP4.abort) as exception: log.LOGGER.warning( "IMAP server at %s failed to accept TLS connection " "because of: %s" % (IMAP_SERVER, exception)) if server_is_local and not connection_is_secure and not IMAP_WARNED_UNENCRYPTED: IMAP_WARNED_UNENCRYPTED = True log.LOGGER.warning( "IMAP server is local. " "Will allow transmitting unencrypted credentials.") if connection_is_secure or server_is_local: try: connection.login(user, password) connection.logout() log.LOGGER.debug( "Authenticated IMAP user %s " "via %s." % (user, IMAP_SERVER)) return True except (imaplib.IMAP4.error, imaplib.IMAP4.abort) as exception: log.LOGGER.error( "IMAP server could not authenticate user %s " "because of: %s" % (user, exception)) else: log.LOGGER.critical( "IMAP server did not support TLS and is not ``localhost``. " "Refusing to transmit passwords under these conditions. " "Authentication attempt aborted.") return False
5,341,774
def apply_mask(input, mask): """Filter out an area of an image using a binary mask. Args: input: A three channel numpy.ndarray. mask: A black and white numpy.ndarray. Returns: A three channel numpy.ndarray. """ return cv2.bitwise_and(input, input, mask=mask)
5,341,775
def runUrllib2(urls, num): """Running benchmark for urllib2. Args: urls: List of URLs. num: Number of requests. """ results = [] for i in range(num): sys.stderr.write('.') start = time.time() for url in urls: urlopen(url) end = time.time() results.append(round(end-start, 3)) return results
5,341,776
def fetch_track_lyrics(artist, title): """ Returns lyrics when found, None when not found """ MUSIXMATCH_KEY = get_musixmatch_key() api_query = 'https://api.musixmatch.com/ws/1.1/matcher.lyrics.get?' api_query += 'q_track=%s&' % title api_query += 'q_artist=%s&' % artist api_query += 'apikey=%s' % MUSIXMATCH_KEY response = requests.get(api_query) if response.status_code != 200: raise Exception("Mixmatcher API not accessible") res_body = json.loads(response.text) message = res_body['message'] if message['header']['status_code'] != 200: return None body = message['body'] if 'lyrics' not in body: return None lyrics = body['lyrics'] return { 'lyrics': lyrics['lyrics_body'], 'lang': lyrics['lyrics_language'] }
5,341,777
def string_avg(strings, binary=True): """ Takes a list of strings of equal length and returns a string containing the most common value from each index in the string. Optional argument: binary - a boolean indicating whether or not to treat strings as binary numbers (fill in leading zeros if lengths differ). """ if binary: # Assume this is a binary number and fill leading zeros strings = deepcopy(strings) longest = len(max(strings, key=len)) for i in range(len(strings)): while len(strings[i]) < longest: split_string = strings[i].split("b") strings[i] = "0b0" + split_string[1] avg = "" for i in (range(len(strings[0]))): opts = [] for s in strings: opts.append(s[i]) avg += max(set(opts), key=opts.count) return avg
5,341,778
def refresh(): """Pull fresh data from Open AQ and replace existing data.""" DB.drop_all() DB.create_all() api = openaq.OpenAQ() status, body = api.measurements(city='Los Angeles', parameter='pm25') reading_list = [] for reading in body['results']: object = Record(datetime = reading['date']['utc'], value = reading['value']) DB.session.add(object) DB.session.commit() return 'Data refreshed!'
5,341,779
def personalize_patient_cellline(expression_data_flag="-e", expression_data=None, cnv_data_flag="-c", cnv_data=None, mutation_data_flag="-m", mutation_data=None, model_bnd_flag="-x", model_bnd=None, model_cfg_flag="-y", model_cfg=None, t_flag="-t", t="Epithelial_cells", model_output_flag="-o", model_output_dir=None, # personalized_result_flag="-p", personalized_result=None ): """ Performs the personalize patient. The Definition is equal to: ./personalize_patient.sh \ -e <expression> \ -c <cells> \ -m <model_prefix> -t <t> \ -o <model_output_dir> \ # -p <personalization_result> \ Sample: ./personalize_patient.sh \ -e $outdir/$sample/norm_data.tsv \ -c $outdir/$sample/cells_metadata.tsv \ -m $model_prefix -t Epithelial_cells \ -o $outdir/$sample/models \ # -p $outdir/$sample/personalized_by_cell_type.tsv \ """ # Empty function since it represents a binary execution: pass
5,341,780
def update_organization(current_user): """ Обновление информации об организации. """ try: if CmsUsers.can(current_user.id, "put", "contacts"): organization = CmsOrganization.query.first() update_data = request.get_json() for key in list(update_data.keys()): if key not in ['company_name', 'full_company_name', 'requisites']: del update_data[key] if not organization_update_validator.is_valid(update_data): errors = [] for error in sorted( organization_update_validator.iter_errors( update_data), key=str): errors.append(error.message) separator = '; ' error_text = separator.join(errors) response = Response( response=json.dumps({'type': 'danger', 'text': error_text}), status=422, mimetype='application/json' ) else: organization_name_old = organization.company_name organization.company_name = update_data['company_name'] organization.full_company_name = update_data[ 'full_company_name'] if 'requisites' in update_data: organization.requisites = update_data['requisites'] db.session.add(organization) db.session.commit() response = Response( response=json.dumps( {'type': 'success', 'text': 'Отредактирована основная ' 'информация организации ' + str(organization_name_old) + '!', 'link': url_for('.get_organization', _external=True)}), status=200, mimetype='application/json' ) else: response = Response( response=json.dumps({'type': 'danger', 'text': 'Доступ запрещен (403)'}), status=403, mimetype='application/json' ) except Exception: response = server_error(request.args.get("dbg")) return response
5,341,781
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_sep] sep_base_path=/sepm/api/v1 sep_auth_path=/sepm/api/v1/identity/authenticate sep_host=<SEPM server dns name or ip address> sep_port=8446 sep_username=<username> sep_password=<password> sep_domain=<SEP domain name> # Optional settings for access to SEPM via a proxy. #http_proxy=http://proxy:80 #https_proxy=http://proxy:80 # Limit result sent to Resilient, add full result as an attachment. sep_results_limit=200 # Period of time (seconds) to wait for all endpoints to return a scan result. sep_scan_timeout=1800 """ return config_data
5,341,782
def create_users_table() -> None: """ SQL query that creates user table if it doesnt exist. Table order must match order defined on User class. """ sql = """CREATE TABLE IF NOT EXISTS users ( slack_id text UNIQUE NOT NULL PRIMARY KEY, slack_channel text UNIQUE, email text UNIQUE, full_name text, pref_name text, phone text, photo_url text, challenge text, challenge_datetime timestamp, can_play_game boolean );""" conn = sqlite3.connect(database) cur = conn.cursor() cur.execute(sql) conn.commit() conn.close()
5,341,783
def read_eieio_command_message(data, offset): """ Reads the content of an EIEIO command message and returns an object\ identifying the command which was contained in the packet, including\ any parameter, if required by the command :param data: data received from the network :type data: bytestring :param offset: offset at which the parsing operation should start :type offset: int :return: an object which inherits from EIEIOCommandMessage which contains\ parsed data received from the network :rtype: \ :py:class:`spinnman.messages.eieio.command_messages.eieio_command_message.EIEIOCommandMessage` """ command_header = EIEIOCommandHeader.from_bytestring(data, offset) command_number = command_header.command if (command_number == constants.EIEIO_COMMAND_IDS.DATABASE_CONFIRMATION.value): return DatabaseConfirmation.from_bytestring( command_header, data, offset + 2) # Fill in buffer area with padding elif (command_number == constants.EIEIO_COMMAND_IDS.EVENT_PADDING.value): return PaddingRequest() # End of all buffers, stop execution elif (command_number == constants.EIEIO_COMMAND_IDS.EVENT_STOP.value): return EventStopRequest() # Stop complaining that there is sdram free space for buffers elif (command_number == constants.EIEIO_COMMAND_IDS.STOP_SENDING_REQUESTS.value): return StopRequests() # Start complaining that there is sdram free space for buffers elif (command_number == constants.EIEIO_COMMAND_IDS.START_SENDING_REQUESTS.value): return StartRequests() # Spinnaker requesting new buffers for spike source population elif (command_number == constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_BUFFERS.value): return SpinnakerRequestBuffers.from_bytestring( command_header, data, offset + 2) # Buffers being sent from host to SpiNNaker elif (command_number == constants.EIEIO_COMMAND_IDS.HOST_SEND_SEQUENCED_DATA.value): return HostSendSequencedData.from_bytestring( command_header, data, offset + 2) # Buffers available to be read from a buffered out vertex elif (command_number == constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_READ_DATA.value): return SpinnakerRequestReadData.from_bytestring( command_header, data, offset + 2) # Host confirming data being read form SpiNNaker memory elif (command_number == constants.EIEIO_COMMAND_IDS.HOST_DATA_READ.value): return HostDataRead.from_bytestring( command_header, data, offset + 2) return EIEIOCommandMessage(command_header, data, offset + 2)
5,341,784
def test_outcomes_unwrap_raises_trio_error_over_qt_value(): """Unwrapping an Outcomes prioritizes a Trio error over a Qt value.""" class LocalUniqueException(Exception): pass this_outcome = qtrio.Outcomes( qt=outcome.Value(9), trio=outcome.Error(LocalUniqueException()), ) with pytest.raises(LocalUniqueException): this_outcome.unwrap()
5,341,785
def quote(): """Get stock quote.""" if request.method == "POST": # Get values get_symbol = request.form.get("symbol") stock = lookup(get_symbol) # Ensure symbol was submitted if not get_symbol: return apology("must provide symbol") # Ensure symbol exists elif not stock: return apology("stock not found") # Display stocks else: return render_template("quoted.html", stock=stock) else: return render_template("quote.html", stock=None)
5,341,786
def counts_to_df(value_counts, colnames, n_points): """DO NOT USE IT! """ pdf = pd.DataFrame(value_counts .to_frame('count') .reset_index() .apply(lambda row: dict({'count': row['count']}, **dict(zip(colnames, row['index'].toArray()))), axis=1) .values .tolist()) pdf['count'] /= pdf['count'].sum() proportions = pdf['count'] / pdf['count'].min() factor = int(n_points / proportions.sum()) pdf = pd.concat([pdf[colnames], (proportions * factor).astype(int)], axis=1) combinations = pdf.apply(lambda row: row.to_dict(), axis=1).values.tolist() return pd.DataFrame([dict(v) for c in combinations for v in int(c.pop('count')) * [list(c.items())]])
5,341,787
def make_pair_plot(samples, param_names=None, pair_plot_params=PairPlotParams()): """ Make a pair plot for the parameters from posterior destribution. Parameters ----------- samples : Panda's DataFrame Each column contains samples from posterior distribution. param_names : list of str Names of the parameters for plotting. If None, all will be plotted. Returns ------- Seaborn's PairGrid """ param_names = filter_param_names(samples.columns, param_names) if len(param_names) > pair_plot_params.max_params: print(( f'Showing only first {pair_plot_params.max_params} ' f'parameters out of {len(param_names)} in pair plot.' 'Consider limiting the parameter with "param_names".')) param_names = param_names[:pair_plot_params.max_params] samples = samples[param_names] # Show no more than `max_samples` markers keep_nth = math.ceil(samples.shape[0] / pair_plot_params.max_samples) samples = samples[::keep_nth] g = sns.PairGrid(samples) g = g.map_upper(sns.scatterplot, s=pair_plot_params.marker_size, color=pair_plot_params.color, edgecolor=pair_plot_params.edgecolor, alpha=pair_plot_params.alpha) g = g.map_lower(sns.kdeplot, color=pair_plot_params.color) g = g.map_diag(plt.hist, color=pair_plot_params.color, edgecolor=pair_plot_params.diag_edge_color) return g
5,341,788
def read_meta_soe(metafile): """read soe metadata.csv to get filename to meta mapping""" wavfiles = csv2dict(metafile) return {f['fid']:{k:v for (k,v) in f.items() if k!='fid'} for f in wavfiles}
5,341,789
def send_message(message, string, dm=False, user=None, format_content=True): """send_message Sends a message with string supplied by [lang]_STRING.txt files. :param message: MessageWrapper object with data for formatting. :param string: Name of the string to read. :param dm: Whether the message should be sent to dm. Requires user to not be None :param user: User for dm usage. """ msg = get_string(string, users.get_language(message)) if not msg or msg == MessageCode.UNKNOWN_STRING: return MessageCode.NO_STRING return send_custom_message(message, msg, dm=dm, user=user, format_content=format_content)
5,341,790
def calc_torque(beam, fforb, index=False): """ Calculates torque from a neutral beam (or beam component) torque = F * r_tan = (P/v) * r_tan = (P/sqrt(2E/m)) * r_tan = P * sqrt(m/(2E)) * r_tan :param fforb: :param index: :param beam: beam object with attributes z, m, a, en, pwr, rtan :return: torque """ if index is not False: power = beam.P.W[index] energy = beam.E.J[index] mass = beam.m rtan = beam.rtang[index] torque = power * np.sqrt(0.5 * mass / energy) * rtan * (1.0 - fforb) # Piper Changes: Included fast ion losses. return torque else: power = beam.P.W energy = beam.E.J mass = beam.m rtan = beam.rtang torque = power * np.sqrt(0.5 * mass / energy) * rtan * (1.0-fforb) # Piper Changes: Included fast ion losses. return torque
5,341,791
def test_sa_empty_commit(session): """Direct commit generates nothing """ session.commit() assert [t_writes, t_updates, t_deletes] == [[]] * 3
5,341,792
async def handle_token_job_transition(websocket): """Send several job status, and close with 4002.""" msg_out = WebsocketResponseMethod(type_="job-status", data={"status": "RUNNING"}) await websocket.send(msg_out.as_json().encode("utf8")) await asyncio.sleep(1) msg_out = WebsocketResponseMethod(type_="job-status", data={"status": "COMPLETED"}) await websocket.send(msg_out.as_json().encode("utf8")) await websocket.close(code=4002)
5,341,793
def cli(ctx, user_id): """Create a new API key for a given user. Output: the API key for the user """ return ctx.gi.users.create_user_apikey(user_id)
5,341,794
def whitespace_tokenizer(text): """Tokenize on whitespace, keeping whitespace. Args: text: The text to tokenize. Returns: list: A list of pseudo-word tokens. """ return re.findall(r"\S+\s*", text)
5,341,795
def _u2i(number): """ Converts a 32 bit unsigned number to signed. If the number is negative it indicates an error. On error a pigpio exception will be raised if exceptions is True. """ v = u2i(number) if v < 0: if exceptions: raise error(error_text(v)) return v
5,341,796
def apply_once(func, arr, axes, keepdims=True): """ Similar to `numpy.apply_over_axes`, except this performs the operation over a flattened version of all the axes, meaning that the function will only be called once. This only makes a difference for non-linear functions. Parameters ---------- func : callback Function that operates well on Numpy arrays and returns a single value of compatible dtype. arr : ndarray Array to do operation over. axes : int or iterable Specifies the axes to perform the operation. Only one call will be made to `func`, with all values flattened. keepdims : bool By default, this is True, so the collapsed dimensions remain with length 1. This is simlar to `numpy.apply_over_axes` in that regard. If this is set to False, the dimensions are removed, just like when using for instance `numpy.sum` over a single axis. Note that this is safer than subsequently calling squeeze, since this option will preserve length-1 dimensions that were not operated on. Examples -------- >>> import deepdish as dd >>> import numpy as np >>> rs = np.random.RandomState(0) >>> x = rs.uniform(size=(10, 3, 3)) Image that you have ten 3x3 images and you want to calculate each image's intensity standard deviation: >>> np.apply_over_axes(np.std, x, [1, 2]).ravel() array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604, 0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635]) This is the same as ``x.std(1).std(1)``, which is not the standard deviation of all 9 pixels together. To fix this we can flatten the pixels and try again: >>> x.reshape(10, 9).std(axis=1) array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064, 0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) This is exactly what this function does for you: >>> dd.apply_once(np.std, x, [1, 2], keepdims=False) array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064, 0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) """ all_axes = np.arange(arr.ndim) if isinstance(axes, int): axes = {axes} else: axes = set(axis % arr.ndim for axis in axes) principal_axis = min(axes) for i, axis in enumerate(axes): axis0 = principal_axis + i if axis != axis0: all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0] transposed_arr = arr.transpose(all_axes) new_shape = [] new_shape_keepdims = [] for axis, dim in enumerate(arr.shape): if axis == principal_axis: new_shape.append(-1) elif axis not in axes: new_shape.append(dim) if axis in axes: new_shape_keepdims.append(1) else: new_shape_keepdims.append(dim) collapsed = np.apply_along_axis(func, principal_axis, transposed_arr.reshape(new_shape)) if keepdims: return collapsed.reshape(new_shape_keepdims) else: return collapsed
5,341,797
def test_crop_all_returns_list(in_paths, output_dir, basic_geometry_gdf): """Test that crop all returns a list. """ img_list = es.crop_all( in_paths, output_dir, basic_geometry_gdf, overwrite=True ) assert type(img_list) == list
5,341,798
def _log_data(path, action_name, header, tag, log_metadata=False): """ Log data about a path or trajectory. @param path: trajectory after postprocessing @param action_name: name of Action that generated the trajectory @param header: one-letter header for logs @param tag: tag to filter trajectory tags with @param log_metadata: True if metadata should be logged """ logger = get_logger() path_tags = GetTrajectoryTags(path) log_data = [header, action_name, path_tags.get(tag, 'unknown')] if log_metadata: log_data += [ path_tags.get(Tags.PLANNER, 'unknown'), path_tags.get(Tags.METHOD, 'unknown') ] logger.info(' '.join([str(v) for v in log_data]))
5,341,799