content
stringlengths
22
815k
id
int64
0
4.91M
def group(spanins, prob_threshold, scope, valid_relation_set, mode): """ for each unary instance that is classified as being in a relation, get the other argument which is also classifier as being in the same relation but different role ner1/2: list of unary instances """ assert scope in ['intra', 'cross'] # Grouping requirement # two could be in the same relation if all of the following requirements are satisified: # 1. both are assigned with pos pred_relation_label # 2. one must be subj and one must be obj # 3. relation must be the same # 4. ner1, ner2, relation exists in the valid_relation_set if scope == 'intra': print("getting sent2entities") sent2entities = get_sent2entities(spanins) rels = [] num_sents = len(sent2entities) for i, sentid in enumerate(sent2entities): stdout.write(f"\rgrouping, {i}/{num_sents}") stdout.flush() rels.extend(group_for_sent(sent2entities[sentid], valid_relation_set, prob_threshold, mode)) print() return rels
5,342,000
def simulate_until_target_substate_or_max_t( _simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables): """ Perform simulation to figure whether it reaches target substate. Does not return states of simulations that don't reach target substate. Target substate is not considered as reached until all the perturbations are carried out. Initial state can be considered as reached target substate if no perturbations are present. :param _simulate_until_attractor_or_target_substate_or_max_t: [function] to perform simulation :param initial_state: initial state of the network :param perturbed_nodes_by_t: dict (by time steps) of dicts (by nodes) of node states :param predecessor_node_lists: list of predecessor node lists :param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state) :return: list of states where last state contains target substate, or None if target substate was not reached """ states, *_, target_substate_is_reached, _ = _simulate_until_attractor_or_target_substate_or_max_t( initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables) return states if target_substate_is_reached else None
5,342,001
def test_generate_holiday_events3(): """Tests generate_holiday_events pre_post_num_dict parameter""" # Tests pre_post_num_dict countries = ["UnitedStates", "India"] year_start = 2019 year_end = 2020 holidays_to_model_separately = [ "New Year's Day", "Diwali", "Columbus Day" ] pre_num = 2 post_num = 2 pre_post_num_dict = { "New Year's Day": (0, 2), "Columbus Day": (1, 3)} daily_event_df_dict = generate_holiday_events( countries=countries, holidays_to_model_separately=holidays_to_model_separately, year_start=year_start, year_end=year_end, pre_num=pre_num, post_num=post_num, pre_post_num_dict=pre_post_num_dict) # expected expected_holidays = [ "New Years Day_plus_2", "Diwali_minus_2", "Diwali_plus_2", "Columbus Day_minus_1", "Columbus Day_plus_3" ] assert all([holiday in daily_event_df_dict.keys() for holiday in expected_holidays]) unexpected_holidays = [ "New Years Day_minus_1", "New Years Day_plus_3", "Diwali_minus_3", "Diwali_plus_3", "Columbus Day_minus_2", "Columbus Day_plus_4" ] assert not any([holiday in daily_event_df_dict.keys() for holiday in unexpected_holidays]) with pytest.warns(UserWarning) as record: pre_post_num_dict = {"Bank Holiday": (1, 1)} generate_holiday_events( countries=countries, holidays_to_model_separately=holidays_to_model_separately, year_start=year_start, year_end=year_end, pre_num=pre_num, post_num=post_num, pre_post_num_dict=pre_post_num_dict) assert "Requested holiday 'Bank Holiday' is not valid. Valid holidays are" in record[0].message.args[0]
5,342,002
def query_snpedia_online(rsid): """ @param soup: @param rsid: """ rsid = rsid.capitalize() url = "https://bots.snpedia.com/index.php" rsid_url = f"{url}/{rsid}" page = requests.get(rsid_url) soup = BeautifulSoup(page.content, "html.parser") columns, genotypes = parse_snpedia_online(soup, rsid) return columns, genotypes
5,342,003
def test_warning_monitor_should_pass_disabled(make_data): """WarningCount should pass if the limit is negative.""" data = make_data({SPIDERMON_MAX_WARNINGS: -1}) runner = data.pop("runner") suite = new_suite() data["stats"]["log_count/WARNING"] = 99999 runner.run(suite, **data) assert runner.result.monitor_results[0].error is None
5,342,004
async def get_session(client_id: str, client_secret: str) -> AuthToken: """ Use the Authorization Code Grant flow to get a token. This opens a browser tab. """ refresh_token_file = os.path.join(config.config_dir(), '.refresh.token') base_url = 'https://bitbucket.org/site/oauth2' # If we have a refresh token, use that existing_token = None if os.path.isfile(refresh_token_file): with open(refresh_token_file) as f: existing_token = json.load(f) now = arrow.utcnow() if existing_token and arrow.get(existing_token['expires_at']) - now > timedelta(minutes=5): log.info('Found existing token') return existing_token # Otherwise, send the user to the browser flow redirect_uri = 'https://localhost:8888' client = WebApplicationClient(client_id) auth_url = client.prepare_request_uri(f'{base_url}/authorize', redirect_uri=redirect_uri) print(f'Please go to the following link, then copy the redirected URL back here.\n\n\t{auth_url}\n') code = client.parse_request_uri_response(input('URL: '))['code'] token_reqest_params = parse_qs(client.prepare_request_body(code=code, redirect_uri=redirect_uri)) async with aiohttp.ClientSession() as session: resp = await session.post( f'{base_url}/access_token', headers={'Authorization': aiohttp.BasicAuth(client_id, client_secret).encode()}, data=token_reqest_params ) if resp.status != 200: log.error(await resp.text()) raise Exception('Could not authenticate with the Bitbucket API') token: AuthToken = await resp.json() token['expires_at'] = now.shift(seconds=token['expires_in']).format(arrow.FORMAT_RFC3339) with open(refresh_token_file, 'w') as f: json.dump(token, f) return token
5,342,005
def lower(value: str): # Only one argument. """Converts a string into all lowercase""" return value.lower()
5,342,006
def main(): """This function implements the command-line interface.""" # Parse input configuration. year = argv[2] assert year in ("dry_run", "dev", "2016", "2017") config = argv[1].split('-', 8) technique_string = config[0] assert technique_string in ("hard_terms", "soft_terms", "hard_topics", "soft_topics") technique = technique_string # Set up the document similarity model. if technique == "hard_topics" or technique == "soft_topics": similarity_model = TopicCosineSimilarity() if technique == "soft_topics" or technique == "soft_terms": term_similarity_string = config[1] assert term_similarity_string in ("w2v.ql", "w2v.googlenews", "glove.enwiki_gigaword5", "glove.common_crawl", "glove.twitter", "fasttext.enwiki") term_similarity = term_similarity_string soft_matrices_string = config[2] assert soft_matrices_string in ("mrel", "mlev", "mrel_mlev") if soft_matrices_string == "mrel": soft_matrices = [("mrel", 1.0)] elif soft_matrices_string == "mlev": soft_matrices = [("mlev", 1.0)] else: soft_matrices = [("mrel", 0.5), ("mlev", 0.5)] if technique == "hard_terms": similarity_model = TermHardCosineSimilarity() kwargs = {} elif technique == "hard_topics": kwargs = {} elif technique == "soft_terms": weighting_string = config[3] assert weighting_string in ("early", "late", "none") if weighting_string == "none": weighting = None else: weighting = weighting_string normalization_string = config[4] assert normalization_string in ("soft", "hard", "none") if normalization_string == "none": normalization = None else: normalization = normalization_string rounding_string = config[5] assert rounding_string in ("none", "round", "floor", "ceil") if rounding_string == "none": rounding = None else: rounding = rounding_string similarity_model = TermSoftCosineSimilarity(weighting=weighting, rounding=rounding, \ normalization=normalization) w2v_min_count=int(config[6]) m_knn=int(config[7]) m_threshold=float(config[8]) kwargs = {"soft_matrices": soft_matrices, "w2v_min_count": w2v_min_count, "m_knn": m_knn, \ "m_threshold": m_threshold, "term_similarity": term_similarity } elif technique == "soft_topics": w2v_min_count=int(config[3]) m_knn=int(config[4]) m_threshold=float(config[5]) kwargs = {"soft_matrices": soft_matrices, "w2v_min_count": w2v_min_count, "m_knn": m_knn, \ "m_threshold": m_threshold, "term_similarity": term_similarity } if year == "dry_run": # Prepare the language model and exit prematurely. LanguageModel(similarity=similarity_model, technique=technique, **kwargs) return # Determine directory and file names. if year == "dev": test_dirname = TEST2016_DIRNAME test_predictions_dirname = TEST2016_PREDICTIONS_DIRNAME gold_base_fname = DEV_GOLD_BASE_FNAME test_dataset_fname = DEV_DATASET_FNAME # train_dataset_fnames = TRAIN2016_DATASET_FNAMES elif year == "2016": test_dirname = TEST2016_DIRNAME test_predictions_dirname = TEST2016_PREDICTIONS_DIRNAME gold_base_fname = TEST2016_GOLD_BASE_FNAME test_dataset_fname = TEST2016_DATASET_FNAME # train_dataset_fnames = TRAIN2016_DATASET_FNAMES + [DEV_DATASET_FNAME] elif year == "2017": test_dirname = TEST2017_DIRNAME test_predictions_dirname = TEST2017_PREDICTIONS_DIRNAME gold_base_fname = TEST2017_GOLD_BASE_FNAME test_dataset_fname = TEST2017_DATASET_FNAME # train_dataset_fnames = TRAIN2017_DATASET_FNAMES + [DEV_DATASET_FNAME] output_fname = "%s/subtask_B_%s-%s.txt" % (test_predictions_dirname, argv[1], argv[2]) base_output_fname = "%s/subtask_B_%s-%s.txt" % (TEST_PREDICTIONS_BASE_DIRNAME, argv[1], argv[2]) # Perform the evaluation. if not path.exists(output_fname): LOGGER.info("Producing %s ...", output_fname) file_handler = logging.FileHandler("%s.log" % output_fname, encoding='utf8') logging.getLogger().addHandler(file_handler) start_time = time() language_model = LanguageModel(similarity=similarity_model, technique=technique, **kwargs) evaluate(language_model, [test_dataset_fname], output_fname) LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time)) logging.getLogger().removeHandler(file_handler) print("%s %s %s" % (test_dirname, gold_base_fname, base_output_fname))
5,342,007
def validate_oidc(): """Demonstrates how an access token is validated""" token = request.headers['Authorization'].split(' ')[1] message = check_oidc_token(token) pprint.pprint(message) return jsonify({ 'success': message['success'] })
5,342,008
def merge(a, b, path=None): """From https://stackoverflow.com/a/7205107""" if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value else: pass # ignore conflicts, left dict wins. else: a[key] = b[key] return a
5,342,009
def norm_sq(f,alpha,n,L_mat_long,step): """ This function is the log-likelihood functional with the squared L2 norm of \hat{f_\beta} as the regularization term. """ L_mat=L_mat_long.reshape(n,len(f)) f[f <=0] = 1e-6 val=np.log(np.dot(L_mat,f)) return -sum(val)/n+ alpha*step**2*sum(f**2)
5,342,010
def get_applications(device_id: str = None, rpc_channel: InstrumentServer = None): """ 获取手机应用列表 :param device_id: :param rpc_channel: :return: """ if not rpc_channel: _rpc_channel = init(device_id) else: _rpc_channel = rpc_channel application_list = _rpc_channel.call( "com.apple.instruments.server.services.device.applictionListing", "installedApplicationsMatching:registerUpdateToken:", {}, "").parsed if not rpc_channel: _rpc_channel.stop() return application_list
5,342,011
def test_get_notification_id(): """ """ single_notification = notifications.get_notifications( TOKEN, customerid=CUSTOMERID, limit=1) check_id = '-'.join(single_notification[0]['_id'].split('-')[0:2]) result = notifications.get_notifications( TOKEN, customerid=CUSTOMERID, check_id=check_id, limit=2) assert "error" not in result
5,342,012
def open_mfdataset(files, use_cftime=True, parallel=True, data_vars='minimal', chunks={'time':1}, coords='minimal', compat='override', drop=None, **kwargs): """optimized function for opening large cf datasets. based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115 """ def drop_all_coords(ds): return ds.reset_coords(drop=True) ds = xr.open_mfdataset(files, parallel=parallel, decode_times=False, combine='by_coords', preprocess=drop_all_coords, decode_cf=False, chunks=chunks, data_vars=data_vars, coords=coords, compat=compat, **kwargs) return xr.decode_cf(ds, use_cftime=use_cftime)
5,342,013
def wls_sparse(X, y, w=1., calc_cov=False, verbose=False, **kwargs): """ Parameters ---------- X y w calc_cov verbose kwargs Returns ------- """ # The var returned by ln.lsqr is normalized by the variance of the error. To # obtain the correct variance, it needs to be scaled by the variance of the error. if w is None: # gracefully default to unweighted w = 1. w_std = np.asarray(np.sqrt(w)) wy = np.asarray(w_std * y) w_std = np.broadcast_to( np.atleast_2d(np.squeeze(w_std)).T, (X.shape[0], 1)) if not sp.issparse(X): wX = w_std * X else: wX = X.multiply(w_std) # noinspection PyTypeChecker out_sol = ln.lsqr(wX, wy, show=verbose, calc_var=True, **kwargs) p_sol = out_sol[0] # The residual degree of freedom, defined as the number of observations # minus the rank of the regressor matrix. nobs = len(y) npar = X.shape[1] # ==rank degrees_of_freedom_err = nobs - npar # wresid = np.exp(wy) - np.exp(wX.dot(p_sol)) # this option is better. # difference is small wresid = wy - wX.dot(p_sol) # this option is done by statsmodel err_var = np.dot(wresid, wresid) / degrees_of_freedom_err if calc_cov: # assert np.any() arg = wX.T.dot(wX) if sp.issparse(arg): # arg is square of size double: 1 + nt + no; single: 2 : nt # arg_inv = np.linalg.inv(arg.toarray()) arg_inv = np.linalg.lstsq( arg.todense(), np.eye(npar), rcond=None)[0] else: # arg_inv = np.linalg.inv(arg) arg_inv = np.linalg.lstsq( arg, np.eye(npar), rcond=None)[0] # for tall systems pinv (approximate) is recommended above inv # https://vene.ro/blog/inverses-pseudoinverses-numerical-issues-spee # d-symmetry.html # but better to solve with eye # p_cov = np.array(np.linalg.pinv(arg) * err_var) # arg_inv = np.linalg.pinv(arg) # else: # try: # arg_inv = np.linalg.lstsq(arg, np.eye(nobs), rcond=None)[0] # # except MemoryError: # print('Try calc_cov = False and p_cov = np.diag(p_var); ' # 'And neglect the covariances.') # arg_inv = np.linalg.lstsq(arg, np.eye(nobs), rcond=None)[0] p_cov = np.array(arg_inv * err_var) p_var = np.diagonal(p_cov) assert np.all(p_var >= 0), 'Unable to invert the matrix' + str(p_var) return p_sol, p_var, p_cov else: p_var = out_sol[-1] * err_var # normalized covariance return p_sol, p_var
5,342,014
def _paginate(api, paginated_object): """ The autogenerated client does not support pagination. This function returns a generator over all items of the array that the paginated object `paginated_object` is part of. """ yield from paginated_object.values typename = type(paginated_object).__name__ auth_settings = ["apiKeyHeader", "apiKeyQuery"] while paginated_object.next_link: link = paginated_object.next_link[len(api.api_client.configuration.host):] paginated_object, status, headers = api.api_client.call_api(link, "GET", response_type=typename, auth_settings=auth_settings) if status == 200: yield from paginated_object.values else: raise Exception(f"could not receive paginated data: status {status}")
5,342,015
def register_module(): """Registers this module for use.""" def on_module_disable(): tags.Registry.remove_tag_binding(MathTag.binding_name) def on_module_enable(): tags.Registry.add_tag_binding(MathTag.binding_name, MathTag) global_routes = [ (RESOURCES_URI + '/.*', tags.ResourcesHandler), (MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join( appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-fonts-2.3.0.zip'))), (MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join( appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-2.3.0.zip')))] namespaced_routes = [] global custom_module # pylint: disable=global-statement custom_module = custom_modules.Module( 'Mathematical Formula Display', 'Provides a custom tag to embed mathematical formulas using TeX or MML.' , global_routes, namespaced_routes, notify_module_disabled=on_module_disable, notify_module_enabled=on_module_enable) return custom_module
5,342,016
def sobel_gradients(source: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """ Computes partial derivations to detect angle gradients. """ grad_x = generic_filter(source, np.matrix([ [1, 0, -1], [2, 0, -2], [1, 0, -1]] )) grad_y = generic_filter(source, np.matrix([ [1, 2, 1], [0, 0, 0], [-1, -2, -1]] )) def normalize_angle(x: float) -> int: x = round(x % 180) if x >= 0 and x <= 22.5: return 0 elif x > 22.5 and x <= 67.5: return 45 elif x > 67.5 and x <= 112.5: return 90 elif x > 112.5 and x <= 157.5: return 135 elif x > 157.5 and x <= 180: return 0 thetas = np.arctan2(grad_y, grad_x) thetas = np.vectorize(normalize_angle)(thetas) grads = np.hypot(grad_y, grad_x) return grads, thetas
5,342,017
def flat_dict(d, prefix=""): """ Loop through dictionary d Append any key, val pairs to the return list ret Add the prefix to any key param Recurse if encountered value is a nested dictionary. """ if not isinstance(d, Mapping): return d ret = {} for key, val in d.items(): if isinstance(val, Mapping): ret = {**ret, **flat_dict(val, prefix=prefix + str(key) + "_")} else: ret[prefix + str(key)] = val return ret
5,342,018
def feature_normalization(train, test): """Rescale the data so that each feature in the training set is in the interval [0,1], and apply the same transformations to the test set, using the statistics computed on the training set. Args: train - training set, a 2D numpy array of size (num_instances, num_features) test - test set, a 2D numpy array of size (num_instances, num_features) Returns: train_normalized - training set after normalization test_normalized - test set after normalization """ # TODO
5,342,019
def _get_filehandler_with_formatter(logname, formatter=None): """ Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: """ handler = logging.FileHandler(logname) if formatter is not None: handler.setFormatter(formatter) return handler
5,342,020
def extract_and_save(file_path: str, start: int = 0, stop: int = -1, interval: int = 1): """Extract frames and then save them to a directory. Args: file_path (str): [description] start (int, optional): [description]. Defaults to 0. stop (int, optional): [description]. Defaults to -1. interval (int, optional): [description]. Defaults to 1. """ cap = cv2.VideoCapture(file_path) video_dir, video_name = os.path.split(file_path) frames_dir = f"{video_dir}/frames_{video_name[:-4]}" frames = extract_frames(cap, start, stop, interval) save_images(frames_dir, frames)
5,342,021
def gen_data_tensors( df: pd.DataFrame, lag: int = 6, batch_size: int = 32, validation_ratio: float = 0.2 ) -> (DataLoader, DataLoader, TensorDataset, TensorDataset): """ Primary goal: create dataloader object. """ x_train, y_train = generate_supervised(df, lag=lag) # Transform DataFrame to NumpyArray. x_train, y_train = map(lambda x: x.values, (x_train, y_train)) # Generating Validation Set. x_train, x_val, y_train, y_val = train_test_split( x_train, y_train, test_size=validation_ratio, shuffle=True ) # Transform to Tensor x_train, y_train, x_val, y_val = map( torch.tensor, (x_train, y_train, x_val, y_val) ) assert batch_size <= x_train.shape[0] and batch_size <= x_val.shape[0],\ "Batch size cannot be greater than number of training instances." train_ds = TensorDataset(x_train, y_train) train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True) val_ds = TensorDataset(x_val, y_val) val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=True) return train_dl, val_dl, train_ds, val_ds
5,342,022
def staff_for_site(): """ Used by the Req/Req/Create page - note that this returns Person IDs """ try: site_id = request.args[0] except: result = current.xml.json_message(False, 400, "No Site provided!") else: table = s3db.hrm_human_resource ptable = db.pr_person query = (table.site_id == site_id) & \ (table.deleted == False) & \ (table.status == 1) & \ ((table.end_date == None) | \ (table.end_date > request.utcnow)) & \ (ptable.id == table.person_id) rows = db(query).select(ptable.id, ptable.first_name, ptable.middle_name, ptable.last_name, orderby=ptable.first_name) result = [] append = result.append for row in rows: append({"id" : row.id, "name" : s3_fullname(row) }) result = json.dumps(result) response.headers["Content-Type"] = "application/json" return result
5,342,023
def element_z(sym_or_name): """Convert element symbol or name into a valid element atomic number Z. Args: sym_or_name: string type representing an element symbol or name. Returns: Integer z that is a valid atomic number matching the symbol or name. Raises: ElementZError: if the symbol or name cannot be converted. """ try: return _Z_FROM_SYMBOL[validated_symbol(sym_or_name)] except ElementSymbolError: pass try: return _Z_FROM_NAME[validated_name(sym_or_name)] except ElementNameError: raise ElementZError("Must supply either the element symbol or name")
5,342,024
def to_int(s: str) -> Tuple[bool, int]: """Convert a string s to an int, if possible.""" try: n = int(s) return True, n except Exception: return False, 0
5,342,025
def out2garf(outcar='OUTCAR', poscar='POSCAR', nframes=100, intv=5,\ refstructure=['POSCAR', '0.0'], samplename='POSCAR'): """ """ # read POSCAR fname, scaling, lattice, \ symbols, numbers, atoms, refpos, fixes = read_poscar(poscar) natoms = np.sum(numbers) lx, ly, lz, alpha, beta, gamma, trans_matrix = calc_trans(lattice) # read OUTCAR frames, energies = read_outcar(outcar, natoms, nframes) # adjust positiosn and get cartesian coordinates dirposes = [] cartposes = [] for frame in frames: dirpos = dot(frame[0], inv(lattice.T)) dirpos, refpos = adjust_poses(dirpos, refpos) dirposes.append(dirpos) cartposes.append(dot(dirpos, lattice)) cartposes = np.array(cartposes) # write geo names = [] geo_content = '' for i, cartpos in enumerate(cartposes): if i%intv == 0: name = samplename + '_' + str(i+1).zfill(4) coords = (dot(trans_matrix, cartpos.T)).T geo_content += write_biogrf(name, lx, ly, lz, \ alpha, beta, gamma, atoms, coords) + '\n' names.append(name) refposcar, refenergy = refstructure[0], float(refstructure[1]) if not (isinstance(refposcar, str) and isinstance(refenergy, float)): raise ValueError('First must be POSCAR path and second must be energy.') fname, scaling, lattice, \ symbols, numbers, atoms, refpos, fixes = read_poscar(refposcar) lx, ly, lz, alpha, beta, gamma, trans_matrix = calc_trans(lattice) refname = samplename + '_REF' cartpos = dot(refpos, lattice) coords = (dot(trans_matrix, cartpos.T)).T ref_content = write_biogrf(refname, lx, ly, lz, \ alpha, beta, gamma, atoms, coords) geo_content = ref_content + '\n' + geo_content with open('geo', 'w') as writer: writer.write(geo_content) print('Successfully write geo ...') # write trainset.in file energies = energies - refenergy tsi_content = write_trainsetin(names, energies, refname) with open('trainset.in', 'w') as writer: writer.write(tsi_content) print('Successfully write trainset.in ...')
5,342,026
def simple_computation(maximum_terms:int=None, configuration_of=None): """ Simple 4-operand computations 移除了分数项,因为除法运算会表示为分数 禁用了括号(random_term的expression参数),因为会导致溢出 :return: Problem object """ if not configuration_of: configuration_of = 'simple_computation' func_config = combine_configurations(type_config[configuration_of], global_config) if maximum_terms: func_config['maximum_terms'] = maximum_terms func_config['symbol'] = process_raw_symbol(func_config['symbol']) number_of_terms = randint(2, func_config['maximum_terms']) random_term_kwargs = {'interval':func_config['interval'], 'denominator_interval': func_config['denominator_interval'], 'float_precision': func_config['float_precision'], 'frac': False, 'expression': False, 'symbol': func_config['symbol']} str_question = str(random_term(**random_term_kwargs)) for term_number in range(number_of_terms): # operand term str_question += choice(['+', '-', '*', '/'])+str(random_term(**random_term_kwargs)) answer = sympify(str_question) if func_config['symbol'] else sympify(str_question).round(func_config['float_precision']) question = sympify(str_question, evaluate=False) problem = Problem(question, answer) return problem
5,342,027
def add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key): """Console script for media_server_utils.""" core.add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key) return 0
5,342,028
def cut_graph(G, w): """ Cut a graph down to a given depth Inputs: - G Input graph - w Depth to cut to Output: - cut_G Cut graph """ # Copy the initial graph and get the number of nodes cut_G = G.copy() N = len(G.nodes) # Check all nodes for i in range(N): # If the depth is greater than w, remove the node if nx.shortest_path_length(G, source=0, target=i) > w: cut_G.remove_node(i) return cut_G
5,342,029
def archive_filter_search(articles_qs): """ gets the qs and filters and sends back to the hook for rendering. """ return articles_qs.exclude(updates__article__stage=STAGE_PUBLISHED)
5,342,030
def init_plotscript(config, markets: List, startup_candles: int = 0): """ Initialize objects needed for plotting :return: Dict with candle (OHLCV) data, trades and pairs """ if "pairs" in config: pairs = expand_pairlist(config['pairs'], markets) else: pairs = expand_pairlist(config['exchange']['pair_whitelist'], markets) # Set timerange to use timerange = TimeRange.parse_timerange(config.get('timerange')) data = load_data( datadir=config.get('datadir'), pairs=pairs, timeframe=config['timeframe'], timerange=timerange, startup_candles=startup_candles, data_format=config.get('dataformat_ohlcv', 'json'), ) if startup_candles and data: min_date, max_date = get_timerange(data) logger.info(f"Loading data from {min_date} to {max_date}") timerange.adjust_start_if_necessary(timeframe_to_seconds(config['timeframe']), startup_candles, min_date) no_trades = False filename = config.get('exportfilename') if config.get('no_trades', False): no_trades = True elif config['trade_source'] == 'file': if not filename.is_dir() and not filename.is_file(): logger.warning("Backtest file is missing skipping trades.") no_trades = True try: trades = load_trades( config['trade_source'], db_url=config.get('db_url'), exportfilename=filename, no_trades=no_trades, strategy=config.get('strategy'), ) except ValueError as e: raise OperationalException(e) from e if not trades.empty: trades = trim_dataframe(trades, timerange, 'open_date') return {"ohlcv": data, "trades": trades, "pairs": pairs, "timerange": timerange, }
5,342,031
def get_one(data: List[LogEntry], filterfun: Callable) -> LogEntry: """Get a single entry and assert that after filtering only a single entry remains.""" filtered = list(filter(filterfun, data)) if len(filtered) != 1: raise ValueError(f"Entries not unique after filtering: {filtered}") return filtered[0]
5,342,032
def parse_ignorelist(f): # type: (IO[Text]) -> Tuple[Ignorelist, Set[Text]] """ Parse the ignorelist file given by `f`, and return the parsed structure. :returns: a tuple of an Ignorelist and a set of files that are completely skipped by the linter (i.e. have a '*' entry). """ data = defaultdict(lambda:defaultdict(set)) # type: Ignorelist skipped_files = set() # type: Set[Text] for line in f: line = line.strip() if not line or line.startswith("#"): continue parts = [item.strip() for item in line.split(":")] if len(parts) == 2: error_types_s, file_match = parts line_number = None # type: Optional[int] else: error_types_s, file_match, line_number_s = parts line_number = int(line_number_s) error_types = {item.strip() for item in error_types_s.split(",")} file_match = os.path.normcase(file_match) if "*" in error_types: skipped_files.add(file_match) else: for error_type in error_types: data[error_type][file_match].add(line_number) return data, skipped_files
5,342,033
async def async_setup_entry(opp, config_entry, async_add_entities): """Add binary sensors for a config entry.""" broker = opp.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] sensors = [] for device in broker.devices.values(): for capability in broker.get_assigned(device.device_id, "binary_sensor"): attrib = CAPABILITY_TO_ATTRIB[capability] sensors.append(SmartThingsBinarySensor(device, attrib)) async_add_entities(sensors)
5,342,034
def create(env_name: str, episode_length: int = 1000, action_repeat: int = 1, auto_reset: bool = True, batch_size: Optional[int] = None, **kwargs) -> Env: """Creates an Env with a specified brax system.""" env = _envs[env_name](**kwargs) if episode_length is not None: env = wrappers.EpisodeWrapper(env, episode_length, action_repeat) if batch_size: env = wrappers.VectorWrapper(env, batch_size) if auto_reset: env = wrappers.AutoResetWrapper(env) return env # type: ignore
5,342,035
def from_software_version(software_version): """ Returns the product version dependant limits_constants. This is based on the running software version on the product and can change based on up when you ask a cluster if upgrading. Args: software_version: (str) software version ex "3.1.2.0" or "2.2.7" """ return _get_limits(software_version=software_version)
5,342,036
def _from_parse_feature(parse_feature): """Convert a single feature spec to a ColumnSchema.""" # FixedLenFeature if isinstance(parse_feature, tf.FixedLenFeature): representation = FixedColumnRepresentation(parse_feature.default_value) return ColumnSchema(parse_feature.dtype, parse_feature.shape, representation) # FixedLenSequenceFeature if isinstance(parse_feature, tf.FixedLenSequenceFeature): raise ValueError('DatasetSchema does not support ' 'FixedLenSequenceFeature yet.') # VarLenFeature if isinstance(parse_feature, tf.VarLenFeature): representation = ListColumnRepresentation() return ColumnSchema(parse_feature.dtype, [None], representation) # SparseFeature if isinstance(parse_feature, tf.SparseFeature): index_field = SparseIndexField(name=parse_feature.index_key, is_sorted=parse_feature.already_sorted) representation = SparseColumnRepresentation( value_field_name=parse_feature.value_key, index_fields=[index_field]) return ColumnSchema(parse_feature.dtype, [parse_feature.size], representation) raise ValueError('Cannot interpret feature spec {} with type {}'.format( parse_feature, type(parse_feature)))
5,342,037
def conditional_patch_resource( service_account_json, base_url, project_id, cloud_region, dataset_id, fhir_store_id ): """ If a resource is found based on the search criteria specified in the query parameters, updates part of that resource by applying the operations specified in a JSON Patch document. """ url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region) # The search query in this request updates all Observations # if the subject of the Observation is a particular patient. resource_path = "{}/datasets/{}/fhirStores/{}/fhir/Observation".format( url, dataset_id, fhir_store_id ) # Make an authenticated API request session = get_session(service_account_json) headers = {"Content-Type": "application/json-patch+json"} body = json.dumps( [ { "op": "replace", "path": "/valueQuantity/value", # Sets the BPM for all matching Observations to 80. This # is the portion of the request being patched. "value": 80, } ] ) # The search query is passed in as a query string parameter. params = {"identifier": "my-code-system|ABC-12345"} response = session.patch(resource_path, headers=headers, params=params, data=body) response.raise_for_status() print(response.url) resource = response.json() print( "Conditionally patched all Observations with the " "identifier 'my-code-system|ABC-12345' to use a BPM of 80." ) print(json.dumps(resource, indent=2)) return resource
5,342,038
def setup_virtualenvs(recreate_virtualenvs=False): """ Setup Python virtual environments for all the registered or the provided pack. """ LOG.info("=========================================================") LOG.info("########### Setting up virtual environments #############") LOG.info("=========================================================") pack_dir = cfg.CONF.register.pack fail_on_failure = not cfg.CONF.register.no_fail_on_failure registrar = ResourceRegistrar() if pack_dir: pack_name = os.path.basename(pack_dir) pack_names = [pack_name] # 1. Register pack registrar.register_pack(pack_name=pack_name, pack_dir=pack_dir) else: # 1. Register pack base_dirs = content_utils.get_packs_base_paths() registrar.register_packs(base_dirs=base_dirs) # 2. Retrieve available packs (aka packs which have been registered) pack_names = registrar.get_registered_packs() if recreate_virtualenvs: """ update = False: this is more than an update of an existing virtualenv the virtualenv itself will be removed & recreated this is i.e. useful for updates to a newer Python release """ update = False else: """ update = True: only dependencies inside the virtualenv will be updated """ update = True setup_count = 0 for pack_name in pack_names: try: setup_pack_virtualenv(pack_name=pack_name, update=update, logger=LOG) except Exception as e: exc_info = not fail_on_failure LOG.warning( 'Failed to setup virtualenv for pack "%s": %s', pack_name, e, exc_info=exc_info, ) if fail_on_failure: raise e else: setup_count += 1 LOG.info("Setup virtualenv for %s pack(s)." % (setup_count))
5,342,039
def cal_big_F(p, f): """ calculate finite strain big F for linearized form not fully tested :param p: pressure :param f: small f :return: big F """ return p / (3. * f * np.power((1. + 2. * f), 2.5))
5,342,040
def multiply_aug(data_aug: List[str], factor: int) -> List[str]: """ The original idea here was to use to to speed up some vasp calculations for supercells by initializing the entire CHGCAR file. The current code does not deal with transformation of the Augemetation charges after regridding. This is a naive way to multiply the Augmentation data in the CHGCAR, a real working implementation will require analysis of the PAW projection operators. However, even with such an implementation, the speed up will be minimal due to VASP's interal minimization algorithms. Args: data_aug: The original augmentation data from a CHGCAR factor: The multiplication factor (some integer number of times it gets repeated) Returns: List of strings for each line of the Augmentation data. """ res = [] # type: List[str] cur_block = [] # type: List[str] cnt = 0 for ll in data_aug: if "augmentation" in ll: if cur_block: for j in range(factor): cnt += 1 cur_block[ 0 ] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n" res.extend(cur_block) cur_block = [ll] else: cur_block.append(ll) else: for j in range(factor): cnt += 1 cur_block[ 0 ] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n" res.extend(cur_block) return res
5,342,041
def _parse_instance_chain(chain_str): """ 返回对象链解析出来的实例对象。""" chain = chain_str.split('.') instance_name = chain.pop(0) attr = session['instances'][instance_name] for attr_name in chain: attr = getattr(attr, attr_name) return attr
5,342,042
def epochs_lists( draw, start_time=math.inf, max_epochs=5, min_deme_size=FLOAT_EPS, max_deme_size=FLOAT_MAX, ): """ A hypothesis strategy for creating lists of Epochs for a deme. :param float start_time: The start time of the deme. :param int max_epochs: The maximum number of epochs in the list. """ assert max_epochs >= 2 times = draw( st.lists( st.floats( min_value=0, max_value=min(FLOAT_MAX, start_time), exclude_max=True, width=32, ), unique=True, min_size=1, max_size=max_epochs, ) ) times.sort(reverse=True) epochs = [] for i, end_time in enumerate(times): start_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size)) if i == 0 and math.isinf(start_time): end_size = start_size else: end_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size)) cloning_rate = draw(st.floats(min_value=0, max_value=1)) selfing_rate = draw(st.floats(min_value=0, max_value=prec32(1 - cloning_rate))) epochs.append( dict( end_time=end_time, start_size=start_size, end_size=end_size, cloning_rate=cloning_rate, selfing_rate=selfing_rate, ) ) return epochs
5,342,043
def mlp_layers(nch_input, nch_layers, b_shared=True, bn_momentum=0.1, dropout=0.0): """ [B, Cin, N] -> [B, Cout, N] or [B, Cin] -> [B, Cout] """ layers = [] last = nch_input for i, outp in enumerate(nch_layers): if b_shared: weights = torch.nn.Conv1d(last, outp, 1) else: weights = torch.nn.Linear(last, outp) layers.append(weights) layers.append(torch.nn.BatchNorm1d(outp, momentum=bn_momentum)) layers.append(torch.nn.ReLU()) if b_shared == False and dropout > 0.0: layers.append(torch.nn.Dropout(dropout)) last = outp return layers
5,342,044
def get_imagenet_iterator(root, batch_size, num_workers, data_shape=224, dtype="float32"): """Dataset loader with preprocessing.""" train_dir = os.path.join(root, "train") train_transform, val_transform = get_imagenet_transforms(data_shape, dtype) logging.info("Loading image folder %s, this may take a bit long...", train_dir) train_dataset = ImageFolderDataset(train_dir, transform=train_transform) train_data = DataLoader( train_dataset, batch_size, shuffle=True, last_batch="discard", num_workers=num_workers ) val_dir = os.path.join(root, "val") if not os.path.isdir(os.path.expanduser(os.path.join(root, "val", "n01440764"))): user_warning = ( "Make sure validation images are stored in one subdir per category, a helper script is" " available at https://git.io/vNQv1" ) raise ValueError(user_warning) logging.info("Loading image folder %s, this may take a bit long...", val_dir) val_dataset = ImageFolderDataset(val_dir, transform=val_transform) val_data = DataLoader(val_dataset, batch_size, last_batch="keep", num_workers=num_workers) return DataLoaderIter(train_data, dtype), DataLoaderIter(val_data, dtype)
5,342,045
def path_content_to_string(path): """Convert contents of a directory recursively into a string for easier comparison.""" lines = [] prefix_len = len(path + sep) for root, dirs, files in walk(path): for dir_ in dirs: full_path = join(root, dir_) relative_path = full_path[prefix_len:] size = 0 type_ = "dir" hash_ = "0" line = "{},{},{},{}".format(relative_path, type_, size, hash_) lines.append(line) for filename in files: full_path = join(root, filename) relative_path = full_path[prefix_len:] size = getsize(full_path) type_ = "file" if isfile(full_path) else "dir" hash_ = get_md5(full_path) line = "{},{},{},{}".format(relative_path, type_, size, hash_) lines.append(line) lines = sorted(lines) return "\n".join(lines)
5,342,046
def get_evts(rslt, a_params): """Return start and end times of candidate replay events.""" # get PC firing rates ## PC spks spks_pc = rslt.spks[:, :rslt.p['N_PC']] ## smoothed instantaneous firing rate avg'd over PCs fr_pc = smooth(spks_pc.sum(axis=1) / (rslt.dt * rslt.p['N_PC']), a_params['SMOOTH_FR']) # get start and end time idxs when PC FR is above threshold starts, ends = get_segments(fr_pc >= a_params['EVT_DTCN_TH']) # convert to time starts = starts.astype(float) * rslt.dt ends = ends.astype(float) * rslt.dt # remove too-short gaps btwn events if len(starts) > 0: starts, ends = remove_short_gaps(starts, ends, a_params['MIN_GAP_DUR']) # remove too-short events if len(starts) > 0: starts, ends = remove_short_evts(starts, ends, a_params['MIN_EVT_DUR']) # remove all events that start before min start time if len(starts): mask = starts > a_params['MIN_START'] starts = starts[mask] ends = ends[mask] # remove final event if it hits end of smln if len(ends) and ends[-1] >= rslt.ts[-1]: starts = starts[:-1] ends = ends[:-1] return starts, ends
5,342,047
def plot_many_saliency_maps( saliency_matrix, axes_objects_2d_list, colour_map_object, max_absolute_contour_level, contour_interval, line_width=2): """Plots 2-D saliency map for each predictor. M = number of rows in grid N = number of columns in grid C = number of predictors :param saliency_matrix: M-by-N-by-C numpy array of saliency values. :param axes_objects_2d_list: See doc for `_init_figure_panels`. :param colour_map_object: See doc for `plot_saliency_2d`. :param max_absolute_contour_level: Same. :param max_absolute_contour_level: Same. :param contour_interval: Same. :param line_width: Same. """ num_predictors = saliency_matrix.shape[-1] num_panel_rows = len(axes_objects_2d_list) num_panel_columns = len(axes_objects_2d_list[0]) for m in range(num_predictors): this_panel_row, this_panel_column = numpy.unravel_index( m, (num_panel_rows, num_panel_columns) ) plot_saliency_2d( saliency_matrix=saliency_matrix[..., m], axes_object=axes_objects_2d_list[this_panel_row][this_panel_column], colour_map_object=colour_map_object, max_absolute_contour_level=max_absolute_contour_level, contour_interval=contour_interval, line_width=line_width)
5,342,048
def get_threatfeed_command(client: Client, threatfeed_id: int = None): """ Retrieves the current list of threatFeed objects already configured in the system :param threatfeed_id: The id of the ThreatFeed object. :param client: Vectra Client """ raw_response = client.http_request(url_suffix=f'threatFeeds/{threatfeed_id}' if threatfeed_id else 'threatFeeds') count = demisto.get(raw_response, 'meta.count') if count == 0: return "Couldn't find any results", {}, raw_response res = raw_response.get('threatFeeds') # type: ignore feeds: List[Dict] = [res] if not isinstance(res, List) else sorted(res, key=lambda h: h.get('id')) # type: ignore for feed in feeds: feed.update(feed.get('defaults')) # type: ignore headers = ['id', 'name', 'certainty', 'category', 'duration', 'indicatorType'] readable_output = tableToMarkdown(name='Rules table', t=feeds, headers=headers) context = [] for feed in feeds: context.append(createContext( { 'ID': feed.get('id'), 'Name': feed.get('name'), 'Duration': feed.get('duration'), 'Category': feed.get('category'), 'Certainty': feed.get('certainty'), 'Data': feed.get('data'), 'IndicatorType': feed.get('indicatorType'), }, removeNull=True) ) outputs = {'Vectra.ThreatFeed(val.ID==obj.ID)': context} return readable_output, outputs, raw_response
5,342,049
def email_subscribe_pending_confirm(hexdomain): """Send a confirmation email for a user.""" domain = tools.parse_domain(hexdomain) if domain is None: flask.abort(400, 'Malformed domain or domain not represented in hexadecimal format.') hide_noisy = bool(flask.request.form.get('hide_noisy')) email_address = flask.request.form['email_address'] if email_address.strip() == '': return flask.redirect('/email/subscribe/{}/0?hide_noisy={}'.format( hexdomain, hide_noisy )) verify_code = tools.random_id() verify_url = flask.request.url_root + 'email/verify/{}'.format(verify_code) email_body = email_tools.render_email( 'confirm.html', domain=domain, verify_url=verify_url ) repository.propose_subscription( verify_code, email_address, domain, hide_noisy ) emailer.send( email_address, 'Please verify your subscription', email_body ) return flask.render_template('www/email/pending_verify.html', domain=domain)
5,342,050
def main(): """ Do some stuff """ args = get_args() si = vmware_lib.connect(args.host, args.user, args.password, args.port, args.insecure) content = si.RetrieveContent() if args.folder: folder = vmware_lib.get_obj(content, [vmware_lib.vim.Folder], args.folder) for vm in folder.childEntity: if vm.name == args.vm_name: vm_object = vm else: vm_object = vmware_lib.get_obj(content, [vmware_lib.vim.VirtualMachine], args.vm_name) if vm_object == None: print 'Cannot find {}'.format(args.vm_name) return if args.snapshot_name: print "Creating snapshot {} for {}".format(args.snapshot_name, args.vm_name) take_vm_snapshot(si, vm_object, args.snapshot_name) if args.revert: print "Reverting to latest snapshot for {}".format(args.vm_name) revert_to_latest_snapshot(si, vm_object) tasks.wait_for_tasks(si, [vm_object.PowerOn()]) if args.destroy_all: print "Destroying snapshots for {}".format(args.vm_name) task = vm_object.RemoveAllSnapshots_Task() if args.wait_for_task: vmware_lib.wait_for_task(task) print 'All snapshots for {} have been destroyed'.format(args.vm_name) if args.list_snapshots: try: snaps = vm_object.snapshot.rootSnapshotList for snap in snaps: print snap except AttributeError: print 'No snapshots found for {}'.format(args.vm_name)
5,342,051
def create_zip_hsa_hrr_crosswalk(): """Creates the crosswalk table from ZIP to HSA and from ZIP to HRR from source.""" zipped_csv = ZipFile(BytesIO(requests.get(ZIP_HSA_HRR_URL).content)) zip_df = pd.read_csv(zipped_csv.open(ZIP_HSA_HRR_FILENAME)) # Build the HSA table hsa_df = zip_df[["zipcode18", "hsanum"]].rename( columns={"zipcode18": "zip", "hsanum": "hsa"} ) # Build the HRR table hrr_df = zip_df[["zipcode18", "hrrnum"]].rename( columns={"zipcode18": "zip", "hrrnum": "hrr"} ) # Convert to zero-padded strings hrr_df["zip"] = hrr_df["zip"].astype(str).str.zfill(5) hrr_df["hrr"] = hrr_df["hrr"].astype(str) hsa_df["zip"] = hsa_df["zip"].astype(str).str.zfill(5) hsa_df["hsa"] = hsa_df["hsa"].astype(str) hsa_df.to_csv(join(OUTPUT_DIR, ZIP_HSA_OUT_FILENAME), index=False) hrr_df.to_csv(join(OUTPUT_DIR, ZIP_HRR_OUT_FILENAME), index=False)
5,342,052
def set_state(state='stop', profile_process='worker'): """Set up the profiler state to 'run' or 'stop'. Parameters ---------- state : string, optional Indicates whether to run the profiler, can be 'stop' or 'run'. Default is `stop`. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ state2int = {'stop': 0, 'run': 1} profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]), profile_process2int[profile_process], profiler_kvstore_handle))
5,342,053
def getTrackIds(sp, username, playlist, offset=0): """ Returns the ids of the tracks contained in a playlist :param sp: A spotipy.Spotify object to be used for the request. :param username: The username of the user who's playlists you want the retrieve. :param playlist: Name of the playlist from wich the tracks are retrieved. :param offset: Do not worry about this parameter, it is used for recursion. :returns: A list containing all the ids of the tracks that are in the playlist. """ limit = 100 fields = "items(track(id)), total" api_response = sp.user_playlist_tracks(username, playlist["id"], fields, limit=limit, offset=offset) track_ids = [x["track"]["id"] for x in api_response["items"]] if api_response["total"] > limit + offset: next_page = getTrackIds(sp, username, playlist, offset + limit) for item in next_page: track_ids.append(item) return track_ids
5,342,054
def children_of_head(element: Element): """ get children element of body element :param element: :return: """ if element is None: return [] body_xpath = '//head' body_element = element.xpath(body_xpath) if body_element: body_element.__class__ = Element return descendants(body_element, True) return []
5,342,055
def network(name, nodes): """nodes: [ NodeMeta, ... ]""" return NetworkMeta(name=name, nodes=nodes)
5,342,056
def condensed_to_cosine(condensed_format): """Get mhd direction cosine for this condensed format axis""" axis = Axis.from_condensed_format(condensed_format) return permutation_to_cosine(axis.dim_order, axis.dim_flip)
5,342,057
def get_plants_for_species(item): """Get list of plants for a species.""" if item is None or not item or item['name'] is None: return @cached('species_list_{}.json'.format(item['name']), directory='../../data/wikipedia') def get(): def table(dom): # We need to switch to table format - the wikipedia articles # are inconsistent. rows = dom.find('.mw-parser-output .wikitable tr') if not rows: return headings = [h.text.strip() for h in rows[0]] for row in rows[1:]: row_data = {} tds = row.findall('td') if tds is None: continue for i, td in enumerate(tds): try: row_data[headings[i]] = td.text or None except IndexError: continue data.append(row_data) data = [] url = 'https://en.wikipedia.org{}'.format(item['link']) _, dom = get_dom(url) # Try to be specific, but broaden scope if none found. if 'bamboo' in item['name']: table(dom) else: links = dom.find('.mw-parser-output ul li a') if not links: links = dom.find('.mw-parser-output ol li a') if not links: links = dom.find('.mw-parser-output li a') if links: for link in links: if link.text is None: continue # Reference links embedded within the lists. if any([ # External link is invalid link.get('href', '').startswith('http'), # Anchors, invalid link link.get('href', '').startswith('#'), # Not real links/text link.text.startswith('['), link.text == '^', link.text.startswith('\\'), ]): continue data.append(dict(name=link.text, link=link.get('href'))) else: table(dom) return data return get()
5,342,058
def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64): """ Given a password, hash, salt this function verifies the password is equal to hash/salt. Args: - ``password``: The password to perform check on. Returns: - ``bool`` """ candidate_hash = generate_password_hash(password, salt, N, r, p, buflen) return safe_str_cmp(password_hash, candidate_hash)
5,342,059
def _dB_calc(J_field, x, y, z): """ Calcualtes the magnetic field at a point due to a current. Args: J_field (VectorField): Vector field describing the current that the magnetic field is generated from. x: The x coordinate of the point in the magnetic field. y: The y coordinate of the point in the magnetic field. z: The z coordinate of the point in the magnetic field. Returns: tuple (u,v,w): A tuple with the magnitude of the magnetic field at the point (x,y,z). """ B = (0, 0, 0) for coordinates, mag in J_field.vec_field.items(): biot_savart_constant = 10 ** (-7) distance = (x - coordinates[0], y - coordinates[1], z - coordinates[2]) distanceMag = linalg.norm(distance) distanceUnit = (distance[0] / distanceMag, distance[1] / distanceMag, distance[2] / distanceMag) crossProduct = np.cross(coordinates, distanceUnit) dB = (biot_savart_constant*crossProduct) / (distanceMag**2) B = np.add(B, dB) return B
5,342,060
def fixture_handler() -> Generator[MagicMock, None, None]: """Mock all calls, assert calling correct handler""" with patch.object(api, "handler") as handler: handler.get_all_characters = MagicMock() handler.get_inventory = MagicMock() handler.character_search = MagicMock() yield handler
5,342,061
def make_change(amount, denominations, index=0): """ Write a function that, given: 1. an amount of money 2. a list of coin denominations computes the number of ways to make the amount of money with coins of the available denominations. >>> make_change(amount=4, denominations=[1,2,3]) 4 [1,1,1,1] [1,1,2] [1,3] [2,2] >>> make_change(amount=20, denominations=[5, 10]) 3 [5,5,5,5] [5,5,10] [10,10] """
5,342,062
def _CaptureStdErr(result_holder, output_message=None, raw_output=None): """Update OperationResult either from OutputMessage or plain text.""" if not result_holder.stderr: result_holder.stderr = [] if output_message: if output_message.body: result_holder.stderr.append(output_message.body) if output_message.IsError(): result_holder.stderr.append(output_message.error_details.Format()) elif raw_output: result_holder.stderr.append(raw_output)
5,342,063
def create_mpl_subplot(images, color=True): """create mpl subplot with all images in list. even when the color is set to false it still seems to :param images: the list of images to plot :type images: cv2 image :param color: whether to plot in color or grayscale, defaults to True :type color: boolean :return: the complete plot :rtype: mpl plot """ if not color: plt.set_cmap('gray') n = math.ceil(math.sqrt(len(images))) i = 1 for img in images: plt.subplot(n, n, i) plt.imshow(img) plt.xticks([]), plt.yticks([]) i += 1 return plt
5,342,064
def filter_subclasses(superclass, iter): """Returns an iterable of class obects which are subclasses of `superclass` filtered from a source iteration. :param superclass: The superclass to filter against :return: An iterable of classes which are subclasses of `superclass` """ return filter(lambda klass: issubclass(klass, superclass), iter)
5,342,065
def get_database_table_column_name(_conn: psycopg2.extensions.connection, _table: str) -> list: """ Taken from: https://kb.objectrocket.com/postgresql/get-the-column-names-from-a-postgresql-table-with-the-psycopg2-python-adapter-756 # noqa defines a function that gets the column names from a PostgreSQL table. """ # declare an empty list for the column names columns = [] # declare cursor objects from the connection col_cursor = _conn.cursor() # concatenate string for query to get column names # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'some_table'; # noqa col_names_str = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE " col_names_str += "table_name = '{}';".format(_table) # print the SQL string # print("\ncol_names_str:", col_names_str) try: sql_object = sql.SQL( # pass SQL statement to sql.SQL() method col_names_str ).format( # pass the identifier to the Identifier() method sql.Identifier(_table) ) # execute the SQL string to get list with col names in a tuple col_cursor.execute(sql_object) # get the tuple element from the liast col_names = (col_cursor.fetchall()) # print list of tuples with column names # print("\ncol_names:", col_names) # iterate list of tuples and grab first element for tup in col_names: # append the col name string to the list columns += [tup[0]] # close the cursor object to prevent memory leaks col_cursor.close() except Exception as err: print("get_columns_names ERROR:", err) # return the list of column names return columns
5,342,066
def retry(*exceptions, retries=3, cooldown=5, verbose=True): """ Decorate an async function to execute it a few times before giving up. Hopes that problem is resolved by another side shortly. Args: exceptions (Tuple[Exception]) : The exceptions expected during function execution retries (int): Number of retries of function execution. cooldown (int): Seconds to wait before retry. verbose (bool): Specifies if we should log about not successful attempts. """ def wrap(func): @wraps(func) async def inner(*args, **kwargs): retries_count = 0 while True: try: result = await func(*args, **kwargs) except exceptions as err: retries_count += 1 if retries_count > retries: raise ValueError( func.__qualname__, args, kwargs) from err if cooldown: await asyncio.sleep(cooldown) else: return result return inner return wrap
5,342,067
def process_student(filename_or_URL): """calls mark_student on one student HTML file Creates a BeautifulSoup object and calls mark_student. If the filename_or_URL starts with "https://", attempt to get Firefox cookies before reading from the URL. Parameters: ---------- filename_or_URL: either a local filename, or a URL Returns: -------- return-value of mark_student """ if filename_or_URL[0:8] == "https://": cookiejar=get_cookie_jar() soup=soup_from_URL(filename_or_URL, cookiejar) else: soup=soup_from_file(filename_or_URL) #for q in list_questions(soup): # print(q, "mark=",mark_question(q)) return mark_student(soup)
5,342,068
def find_CH2OH_in_chain(atoms, cycles): """ this function finds terminal CH2OH that C is not in a cycle H ' O(6) ' H ' / R---C(5)---H """ end_carbon_indices = [] end_carbon_indices_atom_list = {} for _ in range(len(atoms)): name = atoms[_].get_atom_name() if name != 'C' or is_in_a_cycle(cycles, _): continue nghs_c5 = atoms[_].get_ngh() nums_c5, nghs_list_c5 = parse_atom_nghs(nghs_c5, ['H', 'C', 'O']) if nums_c5['H'] == 2 and nums_c5['O'] == 1: o6_index = nghs_list_c5['O'][0] nghs_o6 = atoms[o6_index].get_ngh() nums_o6, nghs_list_o6 = parse_atom_nghs(nghs_o6, ['H', 'C', 'O']) if len(nghs_o6) == 2 and nums_o6['H'] == 1 and nums_o6['C'] == 1: end_carbon_indices.append(_) end_carbon_indices_atom_list[_] = [] for __ in nghs_c5: ___ = __[0] if ___ not in end_carbon_indices_atom_list[_]: end_carbon_indices_atom_list[_].append(___) for __ in nghs_o6: ___ = __[0] if ___ not in end_carbon_indices_atom_list[_]: end_carbon_indices_atom_list[_].append(___) return end_carbon_indices, end_carbon_indices_atom_list
5,342,069
def get_gradient_descent_query(COO=True, parameter=None): """ Generates the query for solving the logistic regression problem :param COO: boolean indicating if the data are in the C00 format :param parameter: dictionary containing number of iterations, features, regularization parameter and step width :return: """ iterations = parameter.get('iterations', 10) features = parameter.get('features', 10) regularization = parameter.get('regularization', 2) step_width = parameter.get('step_width', 0.001) if COO: with open(os.path.join('queries', f'demo_gradient_descent_COO.sql')) as f: query = f.read().format( iterations=iterations, regularization=regularization, step_width=step_width ) else: # create format strings n_features = features weights = ",".join([f"w{i + 1}" for i in range(n_features)]) features = ",".join([f"f{i + 1}" for i in range(n_features)]) floats = ",".join(["0.0::float"] * n_features) features_times_weight = "+".join([f"f{i + 1}*w{i + 1}" for i in range(n_features)]) temp_with_intercept = ",".join([f"t{i + 1}" for i in range(n_features + 1)]) # may have to change the regularization parameter sum_feature_times_val = ",".join([f"{regularization}*SUM(f{i + 1}*val)" for i in range(n_features)]) g_with_intercept = ",".join([f"g{i + 1}" for i in range(n_features + 1)]) # may have to change the step size weight_minus_temp_with_intercept = ",".join( [f"w{i + 1}-t{i + 1}" for i in range(n_features)]) + f",intercept-t{n_features + 1}" weight_times_reg_with_intercept = ",".join([f"w{i + 1}-{step_width}*g{i + 1}" for i in range(n_features)]) + f",intercept-{step_width}*g{n_features + 1}" weight_comma_text = "||".join([f"w{i + 1}::text||','" for i in range(n_features)]) # load the file and replace everything specific for the model with open(os.path.join('queries', f'demo_gradient_descent_db-friendly.sql')) as f: query = f.read().format( iterations=iterations, weights=weights, features=features, features_times_weight=features_times_weight, temp_with_intercept=temp_with_intercept, floats=floats, sum_feature_times_val=sum_feature_times_val, g_with_intercept=g_with_intercept, weight_minus_temp_with_intercept=weight_minus_temp_with_intercept, weight_times_reg_with_intercept=weight_times_reg_with_intercept, weight_comma_text=weight_comma_text, step_width=step_width, regularization=regularization ) return query
5,342,070
def keep_point(p, frame): """ p: TrackedPoint instance frame: image (numpy array) """ if not p.in_bounds(): return False if p.coasted_too_long(): return False if p.coasted_too_far(): return False return True
5,342,071
def sqrt(node: NodeWrapper, params: Dict[str, np.ndarray], xmap: Dict[str, XLayer]) -> List[XLayer]: """ONNX Sqrt to XLayer Sqrt conversion function""" logger.info("ONNX Sqrt -> XLayer Sqrt") assert len(node.get_outputs()) == 1 name = node.get_outputs()[0] bottoms = node.get_inputs() iX = xmap[bottoms[0]] # NCHW X = px.ops.sqrt( op_name=px.stringify(name), in_xlayers=[iX], onnx_id=name) return [X]
5,342,072
def get_testcase_desc(suite, testcase_name): """ Return the description of the testcase with the given name of the given testsuite. Remove trailing line returns if applicable, they look nasty in the reports (text and otherwise) """ desc = getattr(suite, testcase_name).__doc__ return strings.format_description(desc.rstrip()) if desc else ""
5,342,073
def slave_freq_one_pc(args): """Wrapper to be able to use Pool""" return args, freq_one_pc(*args)
5,342,074
def base10_to_base26_alph(base10_no): """Convert base-10 integer to base-26 alphabetic system. This function provides a utility to write pdb/psf files such that it can add many more than 9999 atoms and 999 residues. Parameters ---------- base10_no: int The integer to convert to base-26 alphabetic system Returns ------- str The converted base-26 system string See Also -------- mbuild.conversion._to_base: Helper function to perform a base-n conversion """ return _to_base(base10_no, base=26)
5,342,075
def train(model, X, y, name: str): """ train a model on the given training set and optionally save it to disk :param model: the model to train :param X: the sample images, list of numpy arrays (greyscale images) :param y: the target labels, list of strings (kanji) :param name: name of the model used to save it on disk, or None if it is not to be saved :return: the trained model """ # reshape X to 2d X = np.asarray(X) X = X.reshape((X.shape[0], -1)) print("fitting on {} samples".format(len(y))) # train the model print("begin fitting") model.fit(X, y) print("done fitting") # optionally save trained model if name is not None: with open("trained_{}.pkl".format(name), 'wb') as f: pickle.dump(model, f, pickle.HIGHEST_PROTOCOL) return model
5,342,076
def create_heterodyne_parser(): """ Create the argument parser. """ description = """\ A script to heterodyne raw gravitational-wave strain data based on the \ expected evolution of the gravitational-wave signal from a set of pulsars.""" parser = BilbyArgParser( prog=sys.argv[0], description=description, ignore_unknown_config_file_keys=False, allow_abbrev=False, ) parser.add("--config", type=str, is_config_file=True, help="Configuration ini file") parser.add( "--version", action="version", version="%(prog)s {version}".format(version=cwinpy.__version__), ) parser.add( "--periodic-restart-time", default=14400, type=int, help=( "Time after which the job will be self-evicted with code 130. " "After this, condor will restart the job. Default is 14400s. " "This is used to decrease the chance of HTCondor hard evictions." ), ) parser.add( "--overwrite", action="store_true", default=False, help=( "Set this flag to make sure any previously generated heterodyned " 'files are overwritten. By default the analysis will "resume" ' "from where it left off (by checking whether output files, as set " 'using "--output" and "--label" arguments, already exist), such ' "as after forced Condor eviction for checkpointing purposes. " "Therefore, this flag is needs to be explicitly given (the " "default is False) if not wanting to use resume and overwrite " "existing files." ), ) dataparser = parser.add_argument_group("Data inputs") dataparser.add( "--starttime", required=True, type=int, help=("The start time of the data to be heterodyned in GPS seconds."), ) dataparser.add( "--endtime", required=True, type=int, help=("The end time of the data to be heterodyned in GPS seconds."), ) dataparser.add( "--stride", default=3600, type=int, help=( "The number of seconds to stride through the data (i.e., this " "number of seconds of data will be read in in one go), Defaults " "to 3600." ), ) dataparser.add( "--detector", required=True, type=str, help=("The name of the detectors for which the data is to be heterodyned."), ) dataparser.add( "--frametype", type=str, help=( 'The "frame type" name of the data to be heterodyned. If this ' "is not given the correct data set will be attempted to be found " "using the channel name." ), ) dataparser.add( "--channel", required=True, type=str, help=( 'The "channel" within the gravitational-wave data file(s) ' '(either a GW frame ".gwf", or HDF5 file) containing the strain ' "data to be heterodyned. The channel name should contain the " "detector name prefix as the first two characters followed by a " 'colon, e.g., "L1:GWOSC-4KHZ_R1_STRAIN"' ), ) dataparser.add( "--host", type=str, help=( "The server name for finding the gravitational-wave data files. " 'Use "datafind.ligo.org:443" for open data available via CVMFS. ' "To use open data available from the GWOSC use " '"https://www.gw-openscience.org".' ), ) dataparser.add( "--outputframecache", type=str, help=( "If given this should give a file path to which a list of " "gravitational-wave data file paths, as found by the code, will " "be written. If not given then the file list will not be output." ), ) dataparser.add( "--appendframecache", action="store_true", default=False, help=( "If writing out the frame cache to a file, set this to True to " "append to the file rather than overwriting. Default is False." ), ) dataparser.add( "--framecache", help=( "Provide a pregenerated cache of gravitational-wave files, either " "as a single file, or a list of files. Alternatively, you can " "supply a directory containing the files (which will be " "searched recursively for gwf and then hdf5 files), which should " 'be used in conjunction with the "frametype" argument. If giving ' "a list, this should be in the form of a Python list, surrounded " "by quotation marks, e.g., \"['file1.lcf','file2.lcf']\"." ), ) dataparser.add( "--heterodyneddata", help=( "A string, or dictionary of strings, containing the full file " "path, or directory path, pointing the the location of " "pre-heterodyned data. For a single pulsar a file path can be " "given. For multiple pulsars a directory containing heterodyned " "files (in HDF5 or txt format) can be given provided that within " "it the file names contain the pulsar names as supplied in the " 'file input with "--pulsarfiles". Alternatively, a dictionary ' "can be supplied, keyed on the pulsar name, containing a single " "file path or a directory path as above. If supplying a " "directory, it can contain multiple heterodyned files for a each " "pulsar and all will be used. If giving a dictionary it should be " "surrounded by quotation marks." ), ) segmentparser = parser.add_argument_group("Analysis segment inputs") segmentparser.add( "--segmentlist", help=( "Provide a list of data segment start and end times, as " "list/tuple pairs in the list, or an ASCII text file containing " "the segment start and end times in two columns. If a list, this " "should be in the form of a Python list, surrounded by quotation " 'marks, e.g., "[(900000000,900086400),(900100000,900186400)]".' ), ) segmentparser.add( "--includeflags", help=( "If not providing a segment list then give a string, or list of " "strings, giving the data DQ flags that will be used to generate " "a segment list. Lists should be surrounded by quotation marks, " "e.g., \"['L1:DMT-ANALYSIS_READY:1']\"." ), ) segmentparser.add( "--excludeflags", help=( "A string, or list of strings, giving the data DQ flags to " "when generating a segment list. Lists should be surrounded by " "quotation marks." ), ) segmentparser.add( "--outputsegmentlist", type=str, help=( "If generating a segment list it will be output to the file " "specified by this argument." ), ) segmentparser.add( "--appendsegmentlist", action="store_true", default=False, help=( "If generating a segment list set this to True to append to the " 'file specified by "--outputsegmentlist" rather than ' "overwriting. Default is False." ), ) segmentparser.add("--segmentserver", type=str, help=("The segment database URL.")) pulsarparser = parser.add_argument_group("Pulsar inputs") pulsarparser.add( "--pulsarfiles", action="append", help=( "This specifies the pulsars for which to heterodyne the data. It " "can be either i) a string giving the path to an individual " "pulsar Tempo(2)-style parameter file, ii) a string giving the " "path to a directory containing multiple Tempo(2)-style parameter " "files (the path will be recursively searched for any file with " 'the extension ".par"), iii) a list of paths to individual ' "pulsar parameter files, iv) a dictionary containing paths to " "individual pulsars parameter files keyed to their names. If " "instead, pulsar names are given rather than parameter files it " "will attempt to extract an ephemeris for those pulsars from the " "ATNF pulsar catalogue. If such ephemerides are available then " "they will be used (notification will be given when this is " "these cases). If providing a list or dictionary it should be " "surrounded by quotation marks." ), ) pulsarparser.add( "--pulsars", action="append", help=( "You can analyse only particular pulsars from those specified by " 'parameter files found through the "--pulsarfiles" argument by ' "passing a string, or list of strings, with particular pulsars " "names to use." ), ) outputparser = parser.add_argument_group("Data output inputs") outputparser.add( "--output", help=( "The base directory into which the heterodyned results will be " "output. To specify explicit directory paths for individual " "pulsars this can be a dictionary of directory paths keyed to the " 'pulsar name (in which case the "--label" argument will be used ' "to set the file name), or full file paths, which will be used in " 'place of the "--label" argument. If not given then the current' "working directory will be used." ), ) outputparser.add( "--label", help=( "The output format for the heterodyned data files. These can be " 'format strings containing the keywords "psr" for the pulsar ' 'name, "det" for the detector, "freqfactor" for the rotation ' 'frequency scale factor used, "gpsstart" for the GPS start ' 'time, and "gpsend" for the GPS end time. The extension should ' 'be given as ".hdf", ".h5", or ".hdf5". E.g., the default ' 'is "heterodyne_{psr}_{det}_{freqfactor}_{gpsstart}-{gpsend}.hdf".' ), ) heterodyneparser = parser.add_argument_group("Heterodyne inputs") heterodyneparser.add( "--filterknee", type=float, help=( "The knee frequency (Hz) of the low-pass filter applied after " "heterodyning the data. This should only be given when " "heterodying raw strain data and not if re-heterodyning processed " "data. Default is 0.5 Hz." ), ) heterodyneparser.add( "--resamplerate", type=float, required=True, help=( "The rate in Hz at which to resample the data (via averaging) " "after application of the heterodyne (and filter if applied)." ), ) heterodyneparser.add( "--freqfactor", type=float, help=( "The factor applied to the pulsars rotational parameters when " "defining the gravitational-wave phase evolution. For example, " "the default value of 2 multiplies the phase evolution by 2 under " "the assumption of a signal emitted from the l=m=2 quadrupole " "mode of a rigidly rotating triaxial neutron star." ), ) heterodyneparser.add( "--crop", type=int, help=( "The number of seconds to crop from the start and end of data " "segments to remove filter impulse effects and issues prior to " "lock-loss. Default is 60 seconds." ), ) heterodyneparser.add( "--includessb", action="store_true", default=False, help=( "Set this flag to include removing the modulation of the signal due to " "Solar System motion and relativistic effects (e.g., Roemer, " "Einstein, and Shapiro delay) during the heterodyne." ), ) heterodyneparser.add( "--includebsb", action="store_true", default=False, help=( "Set this flag to include removing the modulation of the signal " "due to binary system motion and relativistic effects during the " 'heterodyne. To use this "--includessb" must also be set.' ), ) heterodyneparser.add( "--includeglitch", action="store_true", default=False, help=( "Set this flag to include removing the effects of the phase " "evolution of any modelled pulsar glitches during the heterodyne." ), ) heterodyneparser.add( "--includefitwaves", action="store_true", default=False, help=( "Set this to True to include removing the phase evolution of a " "series of sinusoids designed to model low-frequency timing noise " "in the pulsar signal during the heterodyne." ), ) heterodyneparser.add( "--usetempo2", action="store_true", default=False, help=( "Set this to True to use Tempo2 (via libstempo) to calculate the " "signal phase evolution. For this to be used v2.4.2 or greater of " "libstempo must be installed. When using Tempo2 the " '"--earthephemeris", "--sunephemeris" and "--timeephemeris" ' "arguments do not need to be supplied. This can only be used when " "running the full heterodyne in one stage, but not for " 're-heterodyning previous data, as such all the "--include..." ' "arguments will be assumed to be True." ), ) ephemerisparser = parser.add_argument_group("Solar system ephemeris inputs") ephemerisparser.add( "--earthephemeris", help=( 'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing ' "to the location of a file containing that ephemeris for the " "Earth. The dictionary must be supplied within quotation marks, " "e.g., \"{'DE436':'earth_DE436.txt'}\". If a pulsar requires a " "specific ephemeris that is not provided in this dictionary, then " "the code will automatically attempt to find or download the " "required file if available." ), ) ephemerisparser.add( "--sunephemeris", help=( 'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing ' "to the location of a file containing that ephemeris for the " "Sun. If a pulsar requires a specific ephemeris that is not " "provided in this dictionary, then the code will automatically " "attempt to find or download the required file if available." ), ) ephemerisparser.add( "--timeephemeris", help=( "A dictionary, keyed to time system name, which can be either " '"TCB" or "TDB", pointing to the location of a file containing ' "that ephemeris for that time system. If a pulsar requires a " "specific ephemeris that is not provided in this dictionary, then " "the code will automatically attempt to find or download the " "required file if available." ), ) cfparser = parser.add_argument_group("Configuration inputs") cfparser.add( "--cwinpy-heterodyne-dag-config-file", help=( "A path to the cwinpy_heterodyne_dag configuration file can be " "supplied if this was has been used to setup the heterodyne job." ), ) return parser
5,342,077
def run_phage_boost(genecalls, model_file, verbose): """ Run phage boost :param model_file: The model file that is probably something like model_delta_std_hacked.pickled.silent.gz :param genecalls: The pandas data frame of gene calls :param verbose: more output :return: """ # rolling params period = 20 win_type = 'parzen' min_periods = 1 # region finding params threshold = 0.9 length = 10 gaps = 5 neighbouring = 0 alpha = 0.001 # calculate features from gene calls if verbose: message("Calculating features", "GREEN") df = calculate_features(genecalls) # load model model, feats, feats_, limit = read_model_from_file(model_file) # transform data df = get_predictions.get_deltas(df[feats_]) if verbose: message("Transforming gene predictions to regions", "GREEN") # transform single gene predictions to regions newgenecalls, nphages, res = predict(model, genecalls, df, feats, period, win_type, min_periods, limit, threshold, length, gaps, neighbouring, alpha) return res
5,342,078
def get_wishlist_confirmation_time(): """Return whether user can confirm his wishlist or not No request params. """ try: confirmation_time = g.user.get_wishlist_confirmation_time() can_confirm = datetime.now() - confirmation_time > timedelta( days = 1 ) if confirmation_time is not None else True return data_response( { 'can_confirm' : can_confirm } ) except AuthorizationError: return error_response( 'Neuspješno dohvaćanje vremena zadnjeg potvrđivanja: Nedozvoljena mogućnost.', 403 ) # except: # return error_response( 'Neuspješno dohvaćanje vremena zadnjeg potvrđivanja.' )
5,342,079
def logInfo(msg): """ info """ log_msg("INFO", msg)
5,342,080
def get_bprop_npu_clear_float_status(self): """Grad definition for `NPUClearFloatStatus` operation.""" def bprop(x, out, dout): return (zeros_like(x),) return bprop
5,342,081
def path_to_filename(path, with_suffix=True): """Get filename from path. Parameters ========== path : str Path to retrieve file name from e.g. '/path/to/image.png'. with_suffix : bool Whether to include the suffix of file path in file name. Returns ======= str The file name of the path e.g. 'image.png' or 'image' if `with_suffix` is false. """ p = pathlib.Path(path) if with_suffix: return str(p.name) else: return str(p.with_suffix("").name)
5,342,082
def encode3(Married): """ This function encodes a loan status to either 1 or 0. """ if Married == 'Yes': return 1 else: return 0
5,342,083
def pool_init_price(token0, token1, tick_upper, tick_lower, liquidity_delta, token0_decimals, token1_decimals): """ TODO: finish documentation :param token0: :param token1: :param tick_upper: :param tick_lower: :param liquidity_delta: Can get from etherscan.io using the txn hash (check the logs). :param token0_decimals: :param token1_decimals: :return: """ if (token0 == 0) or (token1 == 0): raise ValueError('Tick range does not span the initial price.') sqrt_price_lower = tick_to_sqrt_price(tick_lower) sqrt_price_upper = tick_to_sqrt_price(tick_upper) # adjust tokens if different decimal conventions are used token0_multiplier = 10.0 ** max(token1_decimals - token0_decimals, 0) token1_multiplier = 10.0 ** max(token0_decimals - token1_decimals, 0) token0 = token0 / token0_multiplier token1 = token1 / token1_multiplier # formula 6.29 sqrt_price = token1 / liquidity_delta + sqrt_price_lower # formula 6.30 calc_token0 = liquidity_delta * (1 / sqrt_price - 1 / sqrt_price_upper) # verify that the calculated price satisfies formula 6.30 assert np.isclose(token0, calc_token0, atol=1e-12, rtol=1e-8), ( f'Calculated token0 {calc_token0:,.4f} does not match input ' f'token0 {token0:,.4f}.' ) return sqrt_price ** 2
5,342,084
def rle_encoding(img, mask_val=1): """ Turns our masks into RLE encoding to easily store them and feed them into models later on https://en.wikipedia.org/wiki/Run-length_encoding Args: img (np.array): Segmentation array mask_val (int): Which value to use to create the RLE Returns: RLE string """ dots = np.where(img.T.flatten() == mask_val)[0] run_lengths = [] prev = -2 for b in dots: if (b > prev + 1): run_lengths.extend((b + 1, 0)) run_lengths[-1] += 1 prev = b return ' '.join([str(x) for x in run_lengths])
5,342,085
def alt(*ops, priority=False, default=_Undefined): """ alt(*ops, priority=False, default=Undefined) Returns an awaitable representing the first and only channel operation to finish. Accepts a variable number of operations that either get from or put to a channel and commits only one of them. If no `default` is provided, then only the first op to finish will be committed. If `default` is provided and none of the `ops` finish immediately, then no operation will be committed and `default` will instead be used to complete the returned awaitable. Args: ops: Operations that either get from or put to a channel. A get operation is represented as simply a channel to get from. A put operation is represented as an iterable of the form ``[channel, val]``, where `val` is an item to put onto `channel`. priority: An optional bool. If True, operations will be tried in order. If False, operations will be tried in random order. default: An optional value to use in case no operation finishes immediately. Returns: An awaitable that evaluates to a tuple of the form ``(val, ch)``. If `default` is not provided, then `val` will be what the first successful operation returned and `ch` will be the channel used in that operation. If `default` is provided and none of the operations complete immediately, then the awaitable will evaluate to ``(default, 'default')``. Raises: ValueError: If `ops` is empty or contains both a get and put operation to the same channel. RuntimeError: If the calling thread has no running event loop. See Also: :func:`b_alt` """ flag = create_flag() future = FlagFuture(flag) ret = _alts(flag, future_deliver_fn(future), ops, priority, default) if ret is not None: asyncio.Future.set_result(future, ret) return future
5,342,086
def make_class_dictable( cls, exclude=constants.default_exclude, exclude_underscore=constants.default_exclude_underscore, fromdict_allow_pk=constants.default_fromdict_allow_pk, include=None, asdict_include=None, fromdict_include=None, ): """Make a class dictable Useful for when the Base class is already defined, for example when using Flask-SQLAlchemy. Warning: This method will overwrite existing attributes if they exists. :param exclude: Will be set as dictalchemy_exclude on the class :param exclude_underscore: Will be set as dictalchemy_exclude_underscore \ on the class :param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\ on the class :param include: Will be set as dictalchemy_include on the class. :param asdict_include: Will be set as `dictalchemy_asdict_include` on the \ class. If not None it will override `dictalchemy_include`. :param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \ the class. If not None it will override `dictalchemy_include`. :returns: The class """ setattr(cls, "dictalchemy_exclude", exclude) setattr(cls, "dictalchemy_exclude_underscore", exclude_underscore) setattr(cls, "dictalchemy_fromdict_allow_pk", fromdict_allow_pk) setattr(cls, "asdict", asdict) setattr(cls, "fromdict", fromdict) setattr(cls, "__iter__", iter) setattr(cls, "dictalchemy_include", include) setattr(cls, "dictalchemy_asdict_include", asdict_include) setattr(cls, "dictalchemy_fromdict_include", fromdict_include) return cls
5,342,087
def async_request_config( hass, name, callback=None, description=None, description_image=None, submit_caption=None, fields=None, link_name=None, link_url=None, entity_picture=None, ): """Create a new request for configuration. Will return an ID to be used for sequent calls. """ if link_name is not None and link_url is not None: description += f"\n\n[{link_name}]({link_url})" if description_image is not None: description += f"\n\n![Description image]({description_image})" if (instance := hass.data.get(_KEY_INSTANCE)) is None: instance = hass.data[_KEY_INSTANCE] = Configurator(hass) request_id = instance.async_request_config( name, callback, description, submit_caption, fields, entity_picture ) if DATA_REQUESTS not in hass.data: hass.data[DATA_REQUESTS] = {} hass.data[DATA_REQUESTS][request_id] = instance return request_id
5,342,088
def get_latest_active_table_version( namespace: str, table_name: str, *args, **kwargs) -> Optional[TableVersion]: """ Gets table version metadata for the latest active version of the specified table. Returns None if no active table version exists for the given table. """ raise NotImplementedError("get_latest_active_table_version not implemented")
5,342,089
def get_blueprint_docs(blueprints, blueprint): """Returns doc string for blueprint.""" doc_string = blueprints[blueprint].__doc__ return doc_string
5,342,090
def check_capability( instance: Any, # pylint: disable=W0613 attribute: attr.Attribute, value: Union[str, List[str]], ) -> None: """Validator that ensures capability has a valid input. Parameters ---------- instance : Any A class object. attribute : attr.Attribute The attribute being validated. value : Union[str, List[str]] The servicing equipment's capability. Should be one of the following: - CTV: crew transfer vehicle/vessel - SCN: small crane - LCN: large crane - CAB: cabling equipment/vessel - RMT: remote reset - DRN: drone - DSV: diving support vessel Raises ------ ValueError Raised if the input is not of the valid inputs. """ valid = set(("CTV", "SCN", "LCN", "CAB", "RMT", "DRN", "DSV")) values = set(convert_to_list(value, str.upper)) invalid = values - valid if invalid: raise ValueError(f"Input {attribute.name} must be any combination of {valid}.")
5,342,091
def getColorPalatte(image, num, show_chart=False): """ Returns the most prevelent colors of an image arguments: image - image to sample colors from num - number of colors to sample show_chart - show a visual representation of the colors selected """ modified_image = np.array(image) modified_image = cv2.resize( modified_image, (600, 400), interpolation=cv2.INTER_AREA ) modified_image = modified_image.reshape(-1, 3) clf = KMeans(n_clusters=num) labels = clf.fit_predict(modified_image) counts = Counter(labels) # sort to ensure correct color percentage counts = dict(sorted(counts.items())) center_colors = clf.cluster_centers_ center_colors = np.rint(center_colors) center_colors = center_colors.astype(int) center_colors = [tuple(color) for color in center_colors] # We get ordered colors by iterating through the keys ordered_colors = [center_colors[i] for i in counts.keys()] hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()] rgb_colors = [ordered_colors[i] for i in counts.keys()] if show_chart: plt.figure(figsize=(10, 6)) plt.subplot(1, 2, 1) plt.imshow(image) plt.subplot(1, 2, 2) plt.pie(counts.values(), labels=hex_colors, colors=hex_colors) plt.show() return rgb_colors
5,342,092
def run_classifier(data,labels, shuffle=False,nfolds=8,scale=True, clf=None,verbose=False): """ run classifier for a single dataset """ features=data if scale: features=sklearn.preprocessing.scale(features) if shuffle: numpy.random.shuffle(labels) if not clf: clf=sklearn.svm.SVC(C=C) skf = sklearn.model_selection.StratifiedKFold(5,shuffle=True) pred=numpy.zeros(labels.shape[0]) for train, test in skf.split(features,labels): clf.fit(features[train,:],labels[train]) pred[test]=clf.predict(features[test,:]) if verbose: print(clf.best_params_) acc=sklearn.metrics.accuracy_score(labels, pred) return acc
5,342,093
def merge_sort(linked_list): """ Sorts a linked list in ascending order - Recursively divide the linked list into sublist containing a single node - Repeatedly merge the sublist to produce sorted sublist until one remains Returns a sorted linked list Takes O(kn log n) time """ if linked_list.size() == 1: return linked_list elif linked_list.head is None: return linked_list left_half, right_half = split(linked_list) left = merge_sort(left_half) right = merge_sort(right_half) return merge(left, right)
5,342,094
def is_binary(file_path): """ Returns True if the file is binary """ with open(file_path, 'rb') as fp: data = fp.read(1024) if not data: return False if b'\0' in data: return True return False
5,342,095
def convert_host_names_to_ids(session, instanceList): """Look up ID of each instance on Amazon. Returns a list of IDs.""" idList = [] for i in instanceList: instId = aws.instanceid_lookup(session, i) if instId is not None: idList.append(instId) return idList
5,342,096
def plotants(vis, figfile): """Plot the physical layout of the antennas described in the MS. vis (str) Path to the input dataset figfile (str) Path to the output image file. The output image format will be inferred from the extension of *figfile*. Example:: from pwkit.environments.casa import tasks tasks.plotants('dataset.ms', 'antennas.png') """ from .scripting import CasapyScript script = os.path.join(os.path.dirname(__file__), 'cscript_plotants.py') with CasapyScript(script, vis=vis, figfile=figfile) as cs: pass
5,342,097
def handle_over_max_file_size(error): """ Args: error: Returns: """ print("werkzeug.exceptions.RequestEntityTooLarge" + error) return 'result : file size is overed.'
5,342,098
def main(): """main""" parser = argparse.ArgumentParser() parser.add_argument("--cuda", action="store_true") parser.add_argument("--num_cuda", type=int, default=1) parser.add_argument("--modality_name", nargs="+", default=["mmt"]) parser.add_argument("--data_save", type=str, default="data", help="save path for features and meta data") parser.add_argument("--data_video", type=str, default="data", help="path to dataset video") parser.add_argument("--group_k", type=int, default=5, help="Number of segments per video") parser.add_argument("--output_file", type=str, default="lsmdc_v1.pth") args = parser.parse_args() params = {} params['word_count_threshold'] = 5 params['input_path'] = 'data/lsmdc/annot/' params['group_by'] = args.group_k params['max_length'] = 30 params['annot'] = ['LSMDC16_annos_training.csv']#, 'LSMDC16_annos_test.csv'] params['splits'] = ['train'] #, 'test'] # params['annot'] = ['LSMDC16_challenge_1000_publictect.csv']#, 'LSMDC16_annos_test.csv'] # params['splits'] = ['test1k'] #, 'test'] videos, groups, movie_ids, vocab = build_vocab(params) itow = {i + 2: w for i, w in enumerate(vocab)} wtoi = {w: i + 2 for i, w in enumerate(vocab)} # inverse table wtoi['<eos>'] = 0 itow[0] = '<eos>' wtoi['<sos>'] = 1 itow[1] = '<sos>' labels, lld = build_label(params, videos, wtoi) #================================================================================== # # load model # net = S3D(token_to_word_path="data/howto100m_mil/s3d_dict.npy", # num_classes=512) # net.load_state_dict(torch.load('data/howto100m_mil/s3d_howto100m.pth')) # net = net.eval() # if args.cuda: # net = net.cuda() print(f"{len(groups)} vids {len(videos)} segs") # make frame features # create directories to store features data_dir = os.path.join(args.data_save, "group{}".format(args.group_k), "video_features") language_dir = os.path.join(args.data_save, "group{}".format(args.group_k), "language_features") meta_dir = os.path.join(args.data_save, "meta") os.makedirs(data_dir, exist_ok=True) os.makedirs(language_dir, exist_ok=True) os.makedirs(meta_dir, exist_ok=True) modality_name = '_'.join(args.modality_name).replace(',', '') # print(modality_name) vid_h5_file = Path(os.path.join(data_dir, modality_name) + '.h5') print(vid_h5_file) vid_h5 = h5py.File(vid_h5_file, "a") meta_data = OrderedDict() pbar = tqdm(total=len(groups)) rnd_idx = np.random.randint(20000, size=(1, 100)) groups_rnd = [groups[i] for i in rnd_idx[0]] i = 0 for vid_gp in groups_rnd: movie_name = vid_gp["movie"] clip_ids = vid_gp["videos"] vid_gp_id = vid_gp['id'] results_collector = [] start_frame = 0 stop_frame = 0 clip_counter = 0 meta_video = OrderedDict() video_segments = [] if str(vid_gp_id) not in vid_h5: for clip_id in clip_ids: clip_name = videos[clip_id]['clip'] clip_caption = videos[clip_id]['narration'] split = vid_gp["split"] data_dir = "/mnt/efs/fs1/workspace/experiments/data_mmt_eccv20/mmt/data/LSMDC/vid_feat_files/mult_h5/" expert_names = ["scene", "rgb", "ocr","s3d","vggish", "audio", "flow"] #print(clip_name, movie_name) raw_captions, raw_captions_t, features, features_t, features_avgpool, features_maxpool = get_sample_data(data_dir, clip_name, expert_names) #print(features) if features["scene"].shape[1] > 2: scene_feat1 = features["scene"][0,0:1024].reshape(1,1024) scene_feat2 = features["scene"][0,1024:2048].reshape(1,1024) else: print("missing scene", clip_name) scene_feat1 = np.zeros((1,1024)) scene_feat2 = np.zeros((1,1024)) if features["rgb"].shape[1] > 2: rgb_feat1 = features["rgb"][0,0:1024].reshape(1,1024) rgb_feat2 = features["rgb"][0,1024:2048].reshape(1,1024) else: print("missing RGB", clip_name) rgb_feat1 = np.zeros((1,1024)) rgb_feat2 = np.zeros((1,1024)) if features["s3d"].shape[1] > 2: s3d_feat1 = features["s3d"][0,:].reshape(1,1024) s3d_feat2 = features["s3d"][1,:].reshape(1,1024) else: print("missing S3D", clip_name) s3d_feat1 = np.zeros((1,1024)) s3d_feat2 = np.zeros((1,1024)) if features["flow"].shape[1] > 2: flow_feat = features["flow"][0,:].reshape(1,1024) else: print("missing Flow", clip_name) flow_feat = np.zeros((1,1024)) if features["ocr"].shape[1] > 300: ocr_feat = features["ocr"].reshape(1,-1) else: print("missing OCR", clip_name) ocr_feat = np.zeros((1,600)) if features["vggish"].shape[1] > 2: vggish_feat = features["vggish"].reshape(1,-1) else: print("missing vggish",clip_name) vggish_feat = np.zeros((1,384)) if features["audio"].shape[1] > 2: audio_feat = features["audio"].reshape(1,-1) else: print("missing audio", clip_name) audio_feat = np.zeros((1,40)) # print(features["vggish"].shape, vggish_feat.shape, "===>") # feat_all = torch.cat(features_all, dim=0) ocr_vggish_feat = np.zeros((1,1024)) ocr_vggish_feat[0, 0:600] = ocr_feat[0, 0:600] ocr_vggish_feat[0, 600:984] = vggish_feat[0, 0:384] ocr_vggish_feat[0, 984:1024] = audio_feat[0,0:40] expert_feats = [scene_feat1, scene_feat2, rgb_feat1, rgb_feat2, s3d_feat1, s3d_feat2, ocr_vggish_feat, flow_feat] results_collector.append(expert_feats) # pdb.set_trace() video_path = os.path.join(args.data_video, movie_name, clip_name) + '.mp4' # Segment level information #tmp_feat = torch.cat(results_collector, dim=0) start_frame = stop_frame stop_frame = len(results_collector)*8 num_features = stop_frame - start_frame # print(start_frame, stop_frame, num_features, len(results_collector)) segment_info = { "narration": clip_caption, "start_frame": start_frame, "stop_frame": stop_frame, "segment_name": clip_name, "num_frames": num_features } video_segments.append(segment_info) clip_counter += 1 results_feat = np.concatenate(results_collector, axis=0).reshape(-1, 1024) # pdb.set_trace() len_results = results_feat.shape[0] # Video level information meta_video = { "num_frames": len_results, "data_id": str(vid_gp_id), "split": split, "segments": video_segments } # write to h5 vid_h5[str(vid_gp_id)] = results_feat feat_len = len_results # del results # del frames_collector del results_collector del results_feat # del video_decord else: feat_len = int(vid_h5[str(vid_gp_id)].shape[0]) # write new meta meta_data[str(vid_gp_id)] = meta_video pbar.update() vid_h5.close() pbar.close() # write new meta meta_data_path = Path( os.path.join( args.data_save, "meta", "meta_group{}_{}.json".format(args.group_k, modality_name))) with open(meta_data_path, "wt") as fh: write_json(meta_data, fh) print("=====Done!=====")
5,342,099