content
stringlengths
22
815k
id
int64
0
4.91M
def test_coinbase_query_balances(function_scope_coinbase): """Test that coinbase balance query works fine for the happy path""" coinbase = function_scope_coinbase def mock_coinbase_accounts(url, timeout): # pylint: disable=unused-argument response = MockResponse( 200, """ { "pagination": { "ending_before": null, "starting_after": null, "limit": 25, "order": "desc", "previous_uri": null, "next_uri": null }, "data": [ { "id": "58542935-67b5-56e1-a3f9-42686e07fa40", "name": "My Vault", "primary": false, "type": "vault", "currency": "BTC", "balance": { "amount": "4.00000000", "currency": "BTC" }, "created_at": "2015-01-31T20:49:02Z", "updated_at": "2015-01-31T20:49:02Z", "resource": "account", "resource_path": "/v2/accounts/58542935-67b5-56e1-a3f9-42686e07fa40", "ready": true }, { "id": "2bbf394c-193b-5b2a-9155-3b4732659ede", "name": "My Wallet", "primary": true, "type": "wallet", "currency": "ETH", "balance": { "amount": "39.59000000", "currency": "ETH" }, "created_at": "2015-01-31T20:49:02Z", "updated_at": "2015-01-31T20:49:02Z", "resource": "account", "resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede" }, { "id": "68542935-67b5-56e1-a3f9-42686e07fa40", "name": "Another Wallet", "primary": false, "type": "vault", "currency": "BTC", "balance": { "amount": "1.230000000", "currency": "BTC" }, "created_at": "2015-01-31T20:49:02Z", "updated_at": "2015-01-31T20:49:02Z", "resource": "account", "resource_path": "/v2/accounts/68542935-67b5-56e1-a3f9-42686e07fa40", "ready": true } ] } """, ) return response with patch.object(coinbase.session, 'get', side_effect=mock_coinbase_accounts): balances, msg = coinbase.query_balances() assert msg == '' assert len(balances) == 2 assert balances[A_BTC].amount == FVal('5.23') assert balances[A_BTC].usd_value == FVal('7.8450000000') assert balances[A_ETH].amount == FVal('39.59') assert balances[A_ETH].usd_value == FVal('59.385000000') warnings = coinbase.msg_aggregator.consume_warnings() errors = coinbase.msg_aggregator.consume_errors() assert len(warnings) == 0 assert len(errors) == 0
5,331,700
def netoversigt(projektnavn: str, **kwargs) -> None: """Opbyg netoversigt""" er_projekt_okay(projektnavn) fire.cli.print("Så kører vi") resultater = netanalyse(projektnavn) skriv_ark(projektnavn, resultater, "-netoversigt") singulære_punkter = tuple(sorted(resultater["Singulære"]["Punkt"])) fire.cli.print( f"Fandt {len(singulære_punkter)} singulære punkter: {singulære_punkter}" )
5,331,701
def print_text_samples(dataset: Dataset, encoder: Encoder, indices, export_file, att_heads=None, weights=None, title=''): """Print text samples of dataset specified by indices to export_file text file.""" export_txt = export_file + '.txt' txt_file = open(export_txt, 'a') if title: txt_file.write(f'{title}\n\n') texts = [] texts_weights = [] i = 1 for idx in indices: tokens = dataset[idx]['text'] text = encoder.decode(tokens) if att_heads is not None: att_head = att_heads[i-1] txt_file.write(f'{i:02}. (h{att_head:02})\n {text}\n\n') else: txt_file.write(f'{i:02}.\n {text}\n\n') if weights is not None: texts_weights.append(weights[i-1][:len(tokens)]) texts.append(text) i += 1 txt_file.close() if weights is not None: export_html = export_file + '.html' createHTML(texts, att_heads, texts_weights, export_html) return
5,331,702
def test_dataset_traverse_dirs(test_output_dirs: OutputFolderForTests, center_crop_size: Optional[TupleInt3]) -> None: """ Test dataset loading when the dataset file only contains file name stems, not full paths. """ # Copy the existing test dataset to a new folder, two levels deep. Later will initialize the # dataset with only the root folder given, to check if the files are still found. source_folder = str(full_ml_test_data_path() / "classification_data") target_folder = str(Path(test_output_dirs.make_sub_dir("foo")) / "bar") shutil.copytree(source_folder, target_folder) # The dataset should only contain the file name stem, without extension. csv_string = StringIO("""subject,channel,path,value,scalar1 S1,image,4be9beed-5861-fdd2-72c2-8dd89aadc1ef S1,label,,True,1.0 S2,image,6ceacaf8-abd2-ffec-2ade-d52afd6dd1be S2,label,,True,2.0 S3,image,61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4 S3,label,,False,3.0 S4,image,61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4 S4,label,,False,3.0 """) df = pd.read_csv(csv_string, sep=",", dtype=str) args = ScalarModelBase(image_channels=["image"], image_file_column="path", label_channels=["label"], label_value_column="value", non_image_feature_channels={}, numerical_columns=[], traverse_dirs_when_loading=True, center_crop_size=center_crop_size, local_dataset=test_output_dirs.root_dir) dataset = ScalarDataset(args, data_frame=df) assert len(dataset) == 4 for i in range(4): item = dataset[i] assert isinstance(item, dict) images = item["images"] assert images is not None assert torch.is_tensor(images) expected_image_size = center_crop_size or (4, 5, 7) assert images.shape == (1,) + expected_image_size
5,331,703
def do_requests_tags(self, subcmd, opts, project): """${cmd_name}: Lists requests with hashtags This command will list requests for a given project together with the list of hashtags in the request diff, so that you can use this information to group them. ${cmd_usage} ${cmd_option_list} """ api = self.get_api_url() requests = get_request_list(api, project = project, req_state =('new', 'review')) for request in requests: description = request.to_xml().findall("description")[0].text req_id = request.to_xml().get("id") req_diff = request_diff("https://api.suse.de", req_id) to_print = "id " + req_id for pattern in patterns: match = re.findall(pattern, req_diff) if len(match) > 0: to_print += str(match) print to_print
5,331,704
def process_phase_boundary(fname): """ Processes the phase boundary file, computed mean and standard deviations """ from scipy.interpolate import interp1d singlets = [] chem_pot = [] temperatures = [] with h5.File(fname, 'r') as hfile: for name in hfile.keys(): grp = hfile[name] singlets.append(np.array(grp["singlets"])) chem_pot.append(np.array(grp["chem_pot"])) temperatures.append(np.array(grp["temperatures"])) max_temp = 0.0 min_temp = 10000000.0 for temp_array in temperatures: if np.max(temp_array) > max_temp: max_temp = np.max(temp_array) if np.min(temp_array) < min_temp: min_temp = np.min(temp_array) temp_linspace = np.linspace(min_temp, max_temp, 200) result = {} result["chem_pot"] = [] result["std_chem_pot"] = [] result["singlets"] = [] result["std_singlets"] = [] result["num_visits"] = [] result["temperature"] = temp_linspace for sing_dset in singlets: if np.any(sing_dset.shape != singlets[0].shape): msg = "Invalid file! Looks like it contains phase boundary\n" msg += " data for different systems" raise ValueError(msg) num_chem_pots = chem_pot[0].shape[1] for i in range(num_chem_pots): mu_averager = DatasetAverager(temp_linspace) for temps, mu in zip(temperatures, chem_pot): mu_averager.add_dataset(temps, mu[:,i]) mu_res = mu_averager.get() result["chem_pot"].append(mu_res["y_values"]) result["std_chem_pot"].append(mu_res["std_y"]) result["num_visits"].append(mu_res["num_visits"]) num_singlets = singlets[0].shape[1] for i in range(num_chem_pots): for temp, singl in zip(temperatures, singlets): singlet_averager = DatasetAverager(temp_linspace) singlet = [] std_singlet = [] for j in range(num_singlets): singlet_averager.add_dataset(temps, singl[:,j,i]) singl_res = singlet_averager.get() singlet.append(singl_res["y_values"]) std_singlet.append(singl_res["std_y"]) result["singlets"].append(singlet) result["std_singlets"].append(std_singlet) return result
5,331,705
def main(): """Entry point for the check_model script. Returns ------- :class:`int` An integer suitable for passing to :func:`sys.exit`. """ from sys import argv from argparse import ArgumentParser desc = """Check actual files against the data model for validity. """ parser = ArgumentParser(description=desc, prog=os.path.basename(argv[0])) parser.add_argument('-d', '--datamodel-dir', dest='desidatamodel', metavar='DIR', help='Override the value of DESIDATAMODEL.') parser.add_argument('-F', '--compare-files', dest='files', action='store_true', help='Compare an individual data model to an individual file.') parser.add_argument('-W', '--warning-is-error', dest='error', action='store_true', help='Data model warnings raise exceptions.') parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Set log level to DEBUG.') parser.add_argument('section', metavar='DIR or FILE', help='Section of the data model or individual model file.') parser.add_argument('directory', metavar='DIR or FILE', help='Check files in this top-level directory, or one individual file.') options = parser.parse_args() if options.verbose: log.setLevel(DEBUG) if 'DESIDATAMODEL' in os.environ: data_model_root = os.environ['DESIDATAMODEL'] else: if options.desidatamodel is not None: data_model_root = options.desidatamodel else: log.critical(("DESIDATAMODEL is not defined. " + "Cannot find data model files!")) return 1 log.debug("DESIDATAMODEL=%s", data_model_root) if options.files: filename = os.path.join(data_model_root, 'doc', options.section) section = os.path.join(data_model_root, 'doc', options.section.split('/')[0]) log.info("Loading individual data model: %s.", filename) files = [DataModel(filename, section)] log.info("Skipping regular expression processing.") # files[0].get_regexp(options.directory, error=options.error) log.info("Setting prototype file for %s to %s.", filename, options.directory) files[0].prototype = options.directory else: section = os.path.join(data_model_root, 'doc', options.section) log.info("Loading data model file in %s.", section) files = scan_model(section) log.info("Searching for data files in %s.", options.directory) files_to_regexp(options.directory, files, error=options.error) log.info("Identifying prototype files in %s.", options.directory) collect_files(options.directory, files) validate_prototypes(files, error=options.error) return 0
5,331,706
def blackman_window(shape, normalization=1): """ Create a 3d Blackman window based on shape. :param shape: tuple, shape of the 3d window :param normalization: value of the integral of the backman window :return: the 3d Blackman window """ nbz, nby, nbx = shape array_z = np.blackman(nbz) array_y = np.blackman(nby) array_x = np.blackman(nbx) blackman2 = np.ones((nbz, nby)) blackman3 = np.ones((nbz, nby, nbx)) for idz in range(nbz): blackman2[idz, :] = array_z[idz] * array_y for idy in range(nby): blackman3[idz, idy] = blackman2[idz, idy] * array_x blackman3 = blackman3 / blackman3.sum() * normalization return blackman3
5,331,707
def list_workflows(): """List all workflows.""" count = 0 with service() as api: doc = api.workflows().list_workflows() for wf in doc[labels.WORKFLOW_LIST]: if count != 0: click.echo() count += 1 title = 'Workflow {}'.format(count) click.echo(title) click.echo('-' * len(title)) click.echo() click.echo('ID : {}'.format(wf[labels.WORKFLOW_ID])) click.echo('Name : {}'.format(wf[labels.WORKFLOW_NAME])) click.echo('Description : {}'.format(wf.get(labels.WORKFLOW_DESCRIPTION))) click.echo('Instructions: {}'.format(wf.get(labels.WORKFLOW_INSTRUCTIONS)))
5,331,708
def asset_movements_from_dictlist(given_data, start_ts, end_ts): """ Gets a list of dict asset movements, most probably read from the json files and a time period. Returns it as a list of the AssetMovement tuples that are inside the time period """ returned_movements = list() for movement in given_data: if movement['timestamp'] < start_ts: continue if movement['timestamp'] > end_ts: break returned_movements.append(AssetMovement( exchange=movement['exchange'], category=movement['category'], timestamp=movement['timestamp'], asset=movement['asset'], amount=FVal(movement['amount']), fee=FVal(movement['fee']), )) return returned_movements
5,331,709
def update_work(work_id): """ Route permettant de modifier les données d'une collection :param work_id: ID de l'oeuvre récupérée depuis la page oeuvre :return: redirection ou template update-work.html :rtype: template """ if request.method == "GET": updateWork = Work.query.get(work_id) return render_template("pages/update-work.html", updateWork=updateWork) else: status, data = Work.update_work( work_id=work_id, title=request.form.get("title", None), author=request.form.get("author", None), date=request.form.get("date", None), medium=request.form.get("medium", None), dimensions=request.form.get("dimensions", None), image=request.form.get("image", None) ) if status is True: flash("Modification réussie !", "success") return redirect("/collections") else: flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger") updateWork = Work.query.get(work_id) return render_template("pages/update-work.html", nom="CollectArt", updateWork=updateWork)
5,331,710
def login_view(request): """Login user view""" if request.method == 'POST': email = request.POST.get('email') password = request.POST.get('password') user = authenticate(request, username=email, password=password) if user is not None: login(request, user) return redirect('/') else: messages.info(request, 'Username Or Password is incorrect.') context = {} return render(request, 'pages/login.html', context)
5,331,711
def set_seed(seed: int) -> RandomState: """ Method to set seed across runs to ensure reproducibility. It fixes seed for single-gpu machines. Args: seed (int): Seed to fix reproducibility. It should different for each run Returns: RandomState: fixed random state to initialize dataset iterators """ torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # set to false for reproducibility, True to boost performance torch.manual_seed(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) random.seed(seed) random_state = random.getstate() os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" return random_state
5,331,712
def test_double_q_learning(episodes=5): """ Performs Double Q Learning (Off-policy TD0 Control) on multiple environments (separately). """ method_name = 'Double Q Learning' # Taxi: tx_env = Taxi() tx_model = TD0ControlModel(tx_env, episodes, alpha=0.4) # episodes=10000 tx_q1_table, tx_q2_table, tx_scores, _ = tx_model.perform_double_q_learning() plot_running_average(tx_env.name, method_name, tx_scores) tx_q1_scores, _ = run_q_table(tx_env, tx_q1_table, episodes) tx_q2_scores, _ = run_q_table(tx_env, tx_q2_table, episodes) scores_list = [tx_q1_scores, tx_q2_scores] labels = ['Q1', 'Q2'] plot_running_average_comparison(tx_env.name + ' - ' + method_name, scores_list, labels) # Mountain Car: mc_env = MountainCar() mc_model = TD0ControlModel(mc_env, episodes) mc_q1_table, mc_q2_table, mc_scores, _ = mc_model.perform_double_q_learning() plot_running_average(mc_env.name, method_name, mc_scores) mc_q1_scores, _ = run_q_table(mc_env, mc_q1_table, episodes) mc_q2_scores, _ = run_q_table(mc_env, mc_q2_table, episodes) scores_list = [mc_q1_scores, mc_q2_scores] labels = ['Q1', 'Q2'] plot_running_average_comparison(mc_env.name + ' - ' + method_name, scores_list, labels) # Cart Pole: cp_env = CartPole() cp_model = TD0ControlModel(cp_env, episodes) cp_q1_table, cp_q2_table, cp_scores, _ = cp_model.perform_double_q_learning() plot_running_average(cp_env.name, method_name, cp_scores) cp_q1_scores, _ = run_q_table(cp_env, cp_q1_table, episodes) cp_q2_scores, _ = run_q_table(cp_env, cp_q2_table, episodes) scores_list = [cp_q1_scores, cp_q2_scores] labels = ['Q1', 'Q2'] plot_running_average_comparison(cp_env.name + ' - ' + method_name, scores_list, labels)
5,331,713
async def help_test_setup_manual_entity_from_yaml(hass, platform, config): """Help to test setup from yaml through configuration entry.""" config_structure = {mqtt.DOMAIN: {platform: config}} await async_setup_component(hass, mqtt.DOMAIN, config_structure) # Mock config entry entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"}) entry.add_to_hass(hass) with patch("paho.mqtt.client.Client") as mock_client: mock_client().connect = lambda *args: 0 assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done()
5,331,714
def mse(im1, im2): """Compute the Mean Squared Error. Compute the Mean Squared Error between the two images, i.e. sum of the squared difference. Args: im1 (ndarray): First array. im2 (ndarray): Second array. Returns: float: Mean Squared Error. """ im1 = np.asarray(im1) im2 = np.asarray(im2) if im1.shape != im2.shape: raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") err = np.sum((im1.astype("float") - im2.astype("float")) ** 2) err /= float(im1.shape[0] * im1.shape[1]) return err
5,331,715
def bert_text_preparation(text, tokenizer): """Preparing the input for BERT Takes a string argument and performs pre-processing like adding special tokens, tokenization, tokens to ids, and tokens to segment ids. All tokens are mapped to seg- ment id = 1. Args: text (str): Text to be converted tokenizer (obj): Tokenizer object to convert text into BERT-re- adable tokens and ids Returns: list: List of BERT-readable tokens obj: Torch tensor with token ids obj: Torch tensor segment ids """ marked_text = "[CLS] " + text + " [SEP]" tokenized_text = tokenizer.tokenize(marked_text) indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) segments_ids = [1]*len(indexed_tokens) # Convert inputs to PyTorch tensors tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) return tokenized_text, tokens_tensor, segments_tensors
5,331,716
def test_elem_q021_elem_q021_v(mode, save_output, output_format): """ TEST :3.3.2 XML Representation of Element Declaration Schema Components : Document with default=Hello andDocument contains Hello World! """ assert_bindings( schema="msData/element/elemQ021.xsd", instance="msData/element/elemQ021.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,331,717
def possibly_equal(first, second): """Equality comparison that propagates uncertainty. It represents uncertainty using its own function object.""" if first is possibly_equal or second is possibly_equal: return possibly_equal #Propagate the possibilities return first == second
5,331,718
def main(): """ Main routine """ # command-line arguments opt = command_line_parser() # do main task do_task(opt)
5,331,719
def get_logs(): """ Endpoint used by Slack /logs command """ req = request.values logger.info(f'Log request received: {req}') if not can_view_logs(req['user_id']): logger.info(f"{req['user_name']} attempted to view logs and was denied") return make_response("You are not authorized to do that.", 200) url = get_temporary_url(req['user_id'], req['text']) logger.info(f"Created log URL for {req['user_name']} : {url.url}") return make_response(f'{request.host_url}logs/{url.url}', 200)
5,331,720
def vn_test(): """Test 'vn' population model""" _, param_file = tempfile.mkstemp(suffix='.json') _, db_file = tempfile.mkstemp(suffix='.hdf5') test_params = { "files": { "reference_file": mitty.tests.test_fasta_genome_file, "dbfile": db_file }, "rng": { "master_seed": 12345 }, "population_model": { "vn": { "p_vx": 0.2, "p_vn": [0.1, 0.5, 0.9] } }, "chromosomes": [1], "variant_models": [ { "snp": { "p": 0.01 } } ] } json.dump(test_params, open(param_file, 'w')) runner = CliRunner() result = runner.invoke(genomes.cli, ['generate', param_file]) assert result.exit_code == 0, result assert os.path.exists(db_file) pop = vr.Population(fname=db_file, mode='r', in_memory=False) # ml = pop.get_variant_master_list(chrom=4) chrom_idx_vx = pop.get_sample_variant_index_for_chromosome(1, 'vx') idx_vx = [set([i[0] for i in chrom_idx_vx if i[1] != c]) for c in [1, 0]] idx_vn = [] for v in ['v0', 'v1', 'v2']: chrom_idx = pop.get_sample_variant_index_for_chromosome(1, v) idx_vn.append([set([i[0] for i in chrom_idx if i[1] != c]) for c in [1, 0]]) for n in range(len(idx_vn) - 1): for cpy in [0, 1]: assert len(idx_vx[cpy]) > 0, idx_vx[cpy] assert abs(len(idx_vn[n][cpy].intersection(idx_vn[n + 1][cpy])) - len(idx_vn[n][cpy])) < 0.05 * len(idx_vn[n][cpy]) assert abs(len(idx_vx[cpy].intersection(idx_vn[n][cpy])) - len(idx_vx[cpy].intersection(idx_vn[n + 1][cpy]))) < 0.05 * len(idx_vx[cpy]) os.remove(param_file) os.remove(db_file)
5,331,721
def list_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name :returns: Return a logical volume list for given volume group : Data format example : ['volume-aaa', 'volume-bbb', 'volume-ccc'] """ out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()]
5,331,722
def question_aligned_passage_embedding(question_lstm_outs, document_embeddings, passage_aligned_embedding_dim): """create question aligned passage embedding. Arguments: - question_lstm_outs: The dimension of output of LSTM that process question word embedding. - document_embeddings: The document embeddings. - passage_aligned_embedding_dim: The dimension of passage aligned embedding. """ def outer_sentence_step(document_embeddings, question_lstm_outs, passage_aligned_embedding_dim): """step function for PaddlePaddle's recurrent_group. In this function, the original input document_embeddings are scattered from nested sequence into sequence by recurrent_group in PaddlePaddle. The step function iterates over each sentence in the document. Arguments: - document_embeddings: The word embeddings of the document. - question_lstm_outs: The dimension of output of LSTM that process question word embedding. - passage_aligned_embedding_dim: The dimension of passage aligned embedding. """ def inner_word_step(word_embedding, question_lstm_outs, question_outs_proj, passage_aligned_embedding_dim): """ In this recurrent_group, sentence embedding has been scattered into word embeddings. The step function iterates over each word in one sentence in the document. Arguments: - word_embedding: The word embeddings of documents. - question_lstm_outs: The dimension of output of LSTM that process question word embedding. - question_outs_proj: The projection of question_lstm_outs into a new hidden space. - passage_aligned_embedding_dim: The dimension of passage aligned embedding. """ doc_word_expand = paddle.layer.expand( input=word_embedding, expand_as=question_lstm_outs, expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE) weights = paddle.layer.fc( input=[question_lstm_outs, doc_word_expand], size=1, bias_attr=False, act=paddle.activation.SequenceSoftmax()) weighted_candidates = paddle.layer.scaling( input=question_outs_proj, weight=weights) return paddle.layer.pooling( input=weighted_candidates, pooling_type=paddle.pooling.Sum()) question_outs_proj = paddle.layer.fc( input=question_lstm_outs, bias_attr=False, size=passage_aligned_embedding_dim) return paddle.layer.recurrent_group( input=[ paddle.layer.SubsequenceInput(document_embeddings), paddle.layer.StaticInput(question_lstm_outs), paddle.layer.StaticInput(question_outs_proj), passage_aligned_embedding_dim, ], step=inner_word_step, name="iter_over_word") return paddle.layer.recurrent_group( input=[ paddle.layer.SubsequenceInput(document_embeddings), paddle.layer.StaticInput(question_lstm_outs), passage_aligned_embedding_dim ], step=outer_sentence_step, name="iter_over_sen")
5,331,723
def lm_loss_fn(forward_fn, vocab_size, params, rng, data, is_training=True): """Compute the loss on data wrt params.""" logits = forward_fn(params, rng, data, is_training) targets = hk.one_hot(data['target'], vocab_size) assert logits.shape == targets.shape mask = jnp.greater(data['obs'], 0) loss = -jnp.sum(targets * jax.nn.log_softmax(logits), axis=-1) loss = jnp.sum(loss * mask) / jnp.sum(mask) return loss
5,331,724
def ParseArguments(): """Parse command line arguments, validate them, and return them. Returns: A dict of: {'args': argparse arguments, see below, 'cur_ip': the ip address of the target, as packed binary, 'cur_node_index': the current node index of the target, 'cur_node_name': the current node name of the target, 'new_ip': the new ip address of the target, as packed binary, 'new_node_index': the new node index of the target, 'new_node_name': the new node name of the target, 'update_type': the update type, a shortname of update_type_helper } Raises: RuntimeError: if run from outside the Makani workspace without specifying --tms570_bin. ValueError: if the binary the user supplied doesn't match the target type. ValueError: if user passes --dump_image without a .elf file. ValueError: if update is a param type, but the file doesn't end in '.bin'. ValueError: if the update type in the filename isn't recognized. ValueError: if the update type is 'CalibParams' but we don't see --calib. ValueError: if the update type is not 'CalibParams' but we see --calib. ValueError: if the update type is 'SerialParams' but we don't see --serial. ValueError: if the update type is not 'SerialParams' but we see --serial. ValueError: if the update type is 'CarrierSerialParams' but we don't see --serial. ValueError: if the update type is not 'CarrierSerialParams' but we see --serial. """ parser = argparse.ArgumentParser( description='Burn an application or parameter set to a board.') parser.add_argument( '--target', help='board to burn, e.g. MOTOR_PBO or FC_A.', required=True) parser.add_argument('file', help='binary to burn, e.g motor_application.elf ' 'or servo_config_params.bin') parser.add_argument('--dump_image', action='store_true', help='Output intermediate .bin file instead of' ' sending it to the device.') parser.add_argument('--calib', action='store_true', help='Add this flag to burn calibration parameters.') parser.add_argument('--serial', action='store_true', help='Add this flag to burn serial parameters.') parser.add_argument('--carrier_serial', action='store_true', help='Add this flag to burn carrier serial' ' parameters.') parser.add_argument('--config', action='store_true', help='Add this flag to burn config parameters.') parser.add_argument('--bootloader', action='store_true', help='Add this flag to burn a bootloader.') parser.add_argument('--override_target', help='Override target identity in bootloader image.') parser.add_argument('--force_hardware', help='Burn e.g. an Fc board, rather than an Aio board.\n' 'use with argument "new" or "old".') parser.add_argument('--ignore_mismatch', action='store_true', help='Ignore mismatch between binary and board app type, ' 'ip address, etc.') # TODO: Allow override of IP address. args = parser.parse_args() args.application = not (args.calib or args.serial or args.carrier_serial or args.config or args.bootloader) if (args.calib + args.serial + args.carrier_serial + args.config + args.bootloader + args.application) != 1: raise ValueError('Cannot specify more than one update type (calib, serial, ' 'carrier_serial, config, or bootloader).') if args.force_hardware and not ParseHardwareType(args.force_hardware): raise ValueError('Unknown hardware type "%s"; please specify a valid ' 'HardwareType.' % args.force_hardware) target_info = GetTargetInfo(args.target) file_info = GetInfoFromFileName(os.path.basename(args.file)) if args.dump_image and not args.file.endswith('.elf'): raise ValueError('--dump_image requires an .elf file.') if args.calib and file_info['update_type'] != 'CalibParams': raise ValueError('That does not look like an calib param file to me.') if file_info['update_type'] == 'CalibParams' and not args.calib: raise ValueError('If you really want to burn calib params, pass --calib.') if args.serial and file_info['update_type'] != 'SerialParams': raise ValueError('That does not look like an serial param file to me.') if file_info['update_type'] == 'SerialParams' and not args.serial: raise ValueError('If you really want to burn serial params, pass --serial.') if args.carrier_serial and file_info['update_type'] != 'CarrierSerialParams': raise ValueError('That does not look like a carrier serial param' ' file to me.') if (file_info['update_type'] == 'CarrierSerialParams' and not args.carrier_serial): raise ValueError('If you really want to burn carrier serial params,' ' pass --carrier_serial.') if args.bootloader and file_info['update_type'] != 'Bootloader': raise ValueError('That does not look like a bootloader file to me.') if file_info['update_type'] == 'Bootloader' and not args.bootloader: raise ValueError( 'If you really want to burn a bootloader, pass --bootloader.') if args.override_target and file_info['update_type'] != 'Bootloader': raise ValueError('--override_target only supported with --bootloader.') if args.override_target: new_target_info = GetTargetInfo(args.override_target) else: new_target_info = target_info logging.info('Attempting to flash %s segment on target %s [%s, index %d].', file_info['update_type'], target_info['node_name'], target_info['ip_address'], target_info['node_index']) logging.info('Flashing file %s.', args.file) return {'args': args, 'cur_ip': target_info['ip_address'], 'cur_node_index': target_info['node_index'], 'cur_node_name': target_info['node_name'], 'new_ip': new_target_info['ip_address'], 'new_node_index': new_target_info['node_index'], 'new_node_name': new_target_info['node_name'], 'file': args.file, 'update_type': file_info['update_type'], }
5,331,725
def chroms_from_build(build): """ Get list of chromosomes from a particular genome build Args: build str Returns: chrom_list list """ chroms = {'grch37': [str(i) for i in range(1, 23)], 'hg19': ['chr{}'.format(i) for i in range(1, 23)] # chroms = {'grch37': [i for i in range(1, 23)] + ['X', 'Y'], } try: return chroms[build] except KeyError: raise ValueError("Oops, I don't recognize the build {}".format(build))
5,331,726
def get_ready_count_string(room: str) -> str: """Returns a string representing how many players in a room are ready. Args: room (str): The room code of the players. Returns: str: A string representing how many players in a room are ready in the format '[ready]/[not ready]'. """ player_count = 0 ready_count = 0 players = get_players(room) for player in players: if player.is_alive: player_count += 1 if player.ready: ready_count += 1 return f'{ready_count}/{player_count}'
5,331,727
def add_object_to_session(object, session): """Explicitly add object to the session.""" if session and object: session.add(object)
5,331,728
def switches(topology: 'Topology') -> List['Node']: """ @param topology: @return: """ return filter_nodes(topology, type=DeviceType.SWITCH)
5,331,729
def geometric_progression(init, ratio): """ Generate a geometric progression start form 'init' and multiplying 'ratio'. """ return _iterate(lambda x: x * ratio, init)
5,331,730
def resolve(marathon_lb_url): """Return the individual URLs for all available Marathon-LB instances given a single URL to a DNS-balanced Marathon-LB cluster. Marathon-LB typically uses DNS for load balancing between instances and so the address provided by the user may actually be multiple load-balanced instances. This function uses DNS to lookup the hostnames (IPv4 A-records) of each instance, returning them all to the caller for use as required. """ url = urllib.parse.urlparse(marathon_lb_url) all_hosts = _get_alias_records(url.hostname) resolved_urls = _reassemble_urls(url, all_hosts) return resolved_urls
5,331,731
def save_data(data, filename, save_path): """Saves the dataset in a given file path""" if not os.path.exists(save_path): os.mkdir(save_path) np.save(os.path.join(save_path, filename), data)
5,331,732
def copy_masked(src, dst, srcval=None, srcmask=None, dstmask=None): """ Copies masked elements from the :samp:`src` array into the :samp:`dst` array. If the arrays are different layouts/shapes then the :samp:`src` array will be copied to an array of the same layout/shape as the :samp:`dst` array. By default, elements of the copied :samp:`src` array which are not in the original input :samp:`src` global domain are set to the mask value (i.e. elements outside the input :samp:`src` array are assumed to be masked). Only non-halo elements are copied. :type src: :obj:`Dds` :param src: Array from which masked voxels are copied. :type dst: :obj:`Dds` :param dst: Array to which masked voxels are copied. :type srcval: numeric :param srcval: Value for elements outside the :samp:`src` global domain. If :samp:`None` set to :samp:`src.mtype.maskValue()`. """ srcMskVal = srcmask dstMskVal = dstmask srcMtype = None srcDtype = src.dtype if (hasattr(src, "mtype") and (src.mtype != None)): srcMtype = src.mtype srcDtype = src.dtype if (srcMskVal is None): srcMskVal = src.mtype.maskValue() if (srcMskVal is None): raise Exception("Source Dds object does not have a non-None mtype attribute required to deterime mask value.") if ((dstMskVal is None) and hasattr(dst, "mtype") and (dst.mtype != None)): dstMskVal = dst.mtype.maskValue() if (dstMskVal is None): raise Exception("Destination Dds object does not have a non-None mtype attribute required to deterime mask value.") if (srcval is None): srcval = srcMskVal if (not have_same_subd_decomp(src, dst)): newSrc = mango.empty_like(dst, mtype=srcMtype, dtype=srcDtype) newSrc.setAllToValue(newSrc.dtype.type(srcval)) newSrc.fill(src) src = newSrc _mango_open_core_so._copy_masked_voxels(src, dst, srcMskVal, dstMskVal)
5,331,733
def _parse_bluetooth_info(data): """ """ # Combine the bytes as a char string and then strip off extra bytes. name = ''.join(chr(i) for i in data[:16]).partition('\0')[0] return BluetoothInfo(name, ''.join(chr(i) for i in data[16:28]), ''.join(chr(i) for i in data[29:]))
5,331,734
async def get_reverse_objects_topranked_for_lst(entities): """ get pairs that point to the given entity as the primary property primary properties are those with the highest rank per property """ # run the query res = await runQuerySingleKey(cacheReverseObjectTop, entities, """ SELECT ?base ?prop ?parent WHERE { VALUES ?base { %s } ?parent ?prop ?base . FILTER( ?prop NOT IN (""" + ex_cls + """) ) # exclude wikilinks and redirects } LIMIT """ + str(config.RESULTS_LIMIT) + """ """) return res
5,331,735
def LU_razcep(A): """ Vrne razcep A kot ``[L\\U]`` """ # eliminacija for p, pivot_vrsta in enumerate(A[:-1]): for i, vrsta in enumerate(A[p + 1:]): if pivot_vrsta[p]: m = vrsta[p] / pivot_vrsta[p] vrsta[p:] = vrsta[p:] - pivot_vrsta[p:] * m vrsta[p] = m return A
5,331,736
def jni_request_identifiers_for_type(field_type, field_reference_name, field_name, object_name="request"): """ Generates jni code that defines C variable corresponding to field of java object (dto or custom type). To be used in request message handlers. :param field_type: type of the field to be initialized (as defined in vpe.api) :param field_reference_name: name of the field reference in generated code :param field_name: name of the field (camelcase) :param object_name: name of the object to be initialized """ # field identifiers jni_type = util.vpp_2_jni_type_mapping[field_type] jni_signature = util.jni_2_signature_mapping[field_type] jni_getter = util.jni_field_accessors[field_type] # field identifier return request_field_identifier_template.substitute( jni_type=jni_type, field_reference_name=field_reference_name, field_name=field_name, jni_signature=jni_signature, jni_getter=jni_getter, object_name=object_name)
5,331,737
def _ValidateDuration(arg_internal_name, arg_value): """Validates an argument which should have a Duration value.""" try: if isinstance(arg_value, basestring): return TIMEOUT_PARSER(arg_value) elif isinstance(arg_value, int): return TIMEOUT_PARSER(str(arg_value)) except arg_parsers.ArgumentTypeError as e: raise InvalidArgException(arg_internal_name, e.message) raise InvalidArgException(arg_internal_name, arg_value)
5,331,738
def get_jaccard_dist1(y_true, y_pred, smooth=default_smooth): """Helper to get Jaccard distance (for loss functions). Note: This mirrors what others in the ML community have been using even for non-binary vectors.""" return 1 - get_jaccard_index1(y_true, y_pred, smooth)
5,331,739
def deduplicate_obi_codes(fname: Path) -> None: """ Remove duplicate http://terminology.hl7.org/CodeSystem/v2-0203#OBI codes from an instance. When using the Medizininformatik Initiative Profile LabObservation, SUSHI v2.1.1 inserts the identifier.type code for http://terminology.hl7.org/CodeSystem/v2-0203#OBI twice, but it has a cardinality of 1, resulting in an error by the FHIR validator. This workaround function actively removes the duplicates. MII Profile: https://www.medizininformatik-initiative.de/fhir/core/modul-labor/StructureDefinition/ObservationLab :param fname: Filename of instance to remove duplicates from :return: None """ def num_obi_codes(json_data: Dict): jp = parse( "$.type.coding[?code = 'OBI' & system='http://terminology.hl7.org/CodeSystem/v2-0203']" ) return len(jp.find(json_data)) def del_obi_codes(identifier: Dict): codings = identifier["type"]["coding"] for i, coding in enumerate(codings): if ( coding["system"] == "http://terminology.hl7.org/CodeSystem/v2-0203" and coding["code"] == "OBI" ): del codings[i] break json_data = json.load(open(fname)) if "identifier" not in json_data: return for identifier in json_data["identifier"]: if num_obi_codes(identifier) > 1: warnings.warn(f"Found multiple OBI codes in {fname}, removing") del_obi_codes(identifier) json.dump(json_data, open(fname, "w"), indent=2)
5,331,740
def fold_conv_bns(onnx_file: str) -> onnx.ModelProto: """ When a batch norm op is the only child operator of a conv op, this function will fold the batch norm into the conv and return the processed graph :param onnx_file: file path to ONNX model to process :return: A loaded ONNX model with BatchNormalization ops folded into Conv ops where possible """ model = onnx.load(onnx_file) conv_nodes = [n for n in model.graph.node if n.op_type == "Conv"] graph_modified = False for conv_node in conv_nodes: conv_output = conv_node.output[0] child_nodes = [n for n in model.graph.node if conv_output in n.input] # Check if the only child of the conv output is a batch norm op if len(child_nodes) == 1 and child_nodes[0].op_type == "BatchNormalization": bn_node = child_nodes[0] fold_performed = _fold_conv_bn(model, conv_node, bn_node) graph_modified = fold_performed or graph_modified return model if graph_modified else None
5,331,741
def numdays(year, month): """ numdays returns the number of days in the given month of the given year. Args: year month Returns: ndays: number of days in month """ NDAYS = list([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]) assert(year >= 0) assert(1 <= month and month <= 12) ndays = NDAYS[month-1] # Check for leap year for February if ((month == 2) and leapyear(year)): ndays += 1 return ndays
5,331,742
def normalise_diversity_year_df(y_div_df): """Normalises a dataframe with diversity information by year and parametre set""" yearly_results_norm = [] # For each possible diversity metric it pivots over parametre sets # and calculates the zscore for the series for x in set(y_div_df["diversity_metric"]): yearly_long = y_div_df.query(f"diversity_metric == '{x}'").pivot_table( index=["year", "diversity_metric"], columns="parametre_set", values="score" ) yearly_long_norm = yearly_long.apply(zscore) yearly_results_norm.append(yearly_long_norm) # Concatenate and melt so they can be visualised with altair y_div_df_norm = ( pd.concat(yearly_results_norm) .reset_index(drop=False) .melt( id_vars=["year", "diversity_metric"], var_name="parametre_set", value_name="score", ) ) return y_div_df_norm
5,331,743
def usage(parser): """Help""" parser.print_help() print("Example:") print("\t" + sys.argv[0] + " --bridgeip 192.168.1.23 --lights Light1,Light2") sys.exit()
5,331,744
def test_contains_valid_chars(): """ Test that _contains_valid_chars works """ test_names = { "metric_name tag-key.str": False, u"&*)": False, "abc.abc-abc/abc_abc": True, " ": False, u'日本語.abc': True, u'abc.日本語': True } for test_string, expected_name in test_names.items(): result = _contains_valid_chars(test_string) assert_equals(result, expected_name, "Validation failed for,'" + test_string+"'")
5,331,745
def allowed_file(filename): """Does filename have the right extension?""" return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
5,331,746
def validate_fetch_params(max_fetch: int, max_events_fetch: int, fetch_events: bool, first_fetch: str, event_types: List[str]) -> None: """ Validates the parameters for fetch incident command. Args: max_fetch: (int): The maximum number of incidents for one fetch. max_events_fetch (int) The maximum number of events per incident for one fetch. fetch_events (bool): Whether or not fetch events when fetching incident. first_fetch: (str): First fetch time in words. """ if first_fetch: arg_to_datetime(first_fetch) # verify that it is a date. if max_fetch > MAX_FETCH: return_error(f'The Maximum number of incidents per fetch should not exceed {MAX_FETCH}.') if fetch_events and max_events_fetch > MAX_EVENTS_FETCH: return_error( f'The Maximum number of events for each incident per fetch should not exceed {MAX_EVENTS_FETCH}.' ) if not isinstance(event_types, list): return_error('The fetched event types must be a list.')
5,331,747
def render( template: str, context: Dict, serializer: Optional[CallableType[[Any], str]] = None, partials: Optional[Dict] = None, missing_variable_handler: Optional[CallableType[[str, str], str]] = None, missing_partial_handler: Optional[CallableType[[str, str], str]] = None, cache_tokens: bool = False, ) -> str: """Render a mustache template""" serializer = serializer or default_serializer missing_variable_handler = missing_variable_handler or missing_variable_default missing_partial_handler = missing_partial_handler or missing_partial_default partials = partials or {} output: str = '' context_stack: List = [context] env_stack: List = [] left_delimiter: str = '{{' right_delimiter: str = '}}' pointer: int = 0 tokens = [] if cache_tokens: tokens = list(tokenize(template, 0, left_delimiter, right_delimiter)) while True: if cache_tokens: try: (token, value, indentation), position_pointer = tokens[pointer] pointer += 1 except IndexError: break else: try: (token, value, indentation), pointer = next( tokenize(template, pointer, left_delimiter, right_delimiter) ) position_pointer = pointer except StopIteration: break current_context = context_stack[-1] if token is Token.SET_DELIMITER: new_delimiters = value.strip().split(' ') left_delimiter = new_delimiters[0] right_delimiter = new_delimiters[-1] if token is Token.END: current_env = env_stack[-1] context_stack.pop() env_name, env_pointer, [env_var, _] = current_env if should_iterate(env_var): current_env[2][1] += 1 try: next_item = env_var[current_env[2][1]] context_stack.append(next_item) pointer = env_pointer continue except IndexError: pass if env_name != value: raise MustacheSyntaxError.from_template_pointer( f'Unexpected section end tag on line {{line_number}}. Expected "{env_name}" got "{value}"', template, position_pointer, ) env_stack.pop() if not current_context and len(context_stack) != 1: if token in [Token.SECTION, Token.INVERTED]: context_stack.append(False) env_stack.append([value, pointer, [False, 0]]) continue if token in [Token.NO_ESCAPE, Token.VARIABLE, Token.SECTION, Token.INVERTED]: try: variable = get_from_context(context_stack, value) except MissingVariable: variable = missing_variable_handler( value, f'{left_delimiter} {value} {right_delimiter}' ) else: variable = None if token is Token.LITERAL: output += value elif token is Token.NO_ESCAPE: output += serializer(variable) elif token is Token.VARIABLE: output += escape(serializer(variable)) elif token in [Token.SECTION, Token.INVERTED]: if token is Token.INVERTED: variable = not variable if should_iterate(variable): try: context_item = variable[0] context_stack.append(context_item) except IndexError: context_stack.append(False) else: context_stack.append(variable) env_stack.append([value, pointer, [variable, 0]]) elif token is Token.PARTIAL: partial_template = partials.get(value) # potentially raise error here if partial_template is None: partial_template = missing_partial_handler( value, f'{left_delimiter} {value} {right_delimiter}' ) if partial_template != '': remove_trailing_indentation = False if partial_template.endswith('\n'): remove_trailing_indentation = True partial_template = indentation + f'\n{indentation}'.join( partial_template.split('\n') ) if remove_trailing_indentation: partial_template = partial_template[: -len(indentation)] partial_output = render( partial_template, current_context, serializer=serializer, partials=partials ) output += partial_output return output
5,331,748
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions): """ convert the 210x160x3 uint8 frame into a 6400 float vector """ processed_observation = input_observation[35:195] # crop processed_observation = downsample(processed_observation) processed_observation = remove_color(processed_observation) processed_observation = remove_background(processed_observation) processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1 # Convert from 80 x 80 matrix to 6400 x 1 matrix processed_observation = processed_observation.astype(np.float).ravel() # subtract the previous frame from the current one so we are only processing on changes in the game if prev_processed_observation is not None: input_observation = processed_observation - prev_processed_observation else: input_observation = np.zeros(input_dimensions) # store the previous frame so we can subtract from it next time prev_processed_observations = processed_observation return input_observation, prev_processed_observations
5,331,749
def expand_configuration(configuration): """Fill up backups with defaults.""" for backup in configuration['backups']: for field in _FIELDS: if field not in backup or backup[field] is None: if field not in configuration: backup[field] = None else: backup[field] = configuration[field] return configuration['backups']
5,331,750
def create_feature_extractor(input_shape: tuple, dropout:float=0.3, kernel_size:tuple=(3,3,3)) -> tf.keras.Sequential: """ Create feature extracting model :param input_shape: shape of input Z, X, Y, channels :return: feature extracting model """ model = Sequential() model.add(Conv3D(filters=4, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2), input_shape=input_shape)) model.add(Conv3D(filters=8, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2))) model.add(Conv3D(filters=16, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2))) model.add(Dropout(dropout)) return model
5,331,751
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Set up the component.""" hass.data.setdefault(DOMAIN, {}) async_add_defaults(hass, config_entry) router = KeeneticRouter(hass, config_entry) await router.async_setup() undo_listener = config_entry.add_update_listener(update_listener) hass.data[DOMAIN][config_entry.entry_id] = { ROUTER: router, UNDO_UPDATE_LISTENER: undo_listener, } for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, platform) ) return True
5,331,752
def log_set_level(client, level): """Set log level. Args: level: log level we want to set. (for example "DEBUG") """ params = {'level': level} return client.call('log_set_level', params)
5,331,753
def exec_local_command(cmd): """ Executes a command for the local bash shell and return stdout as a string. Raise CalledProcessError in case of non-zero return code. Args: cmd: command as a string Return: STDOUT """ proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = proc.communicate() retcode = proc.poll() if retcode: LOG.error("{0} returned status {1}: {2}".format(cmd, retcode, error)) raise subprocess.CalledProcessError() else: return output
5,331,754
def OGH(p0, p1, v0, v1, t0, t1, t): """Optimized geometric Hermite curve.""" s = (t-t0)/(t1-t0) a0 = (6*np.dot((p1-p0).T,v0)*np.dot(v1.T,v1) - 3*np.dot((p1-p0).T,v1)*np.dot(v0.T,v1)) / ((4*np.dot(v0.T,v0)*np.dot(v1.T,v1) - np.dot(v0.T,v1)*np.dot(v0.T,v1))*(t1-t0)) a1 = (3*np.dot((p1-p0).T,v0)*np.dot(v0.T,v1) - 6*np.dot((p1-p0).T,v1)*np.dot(v0.T,v0)) / ((np.dot(v0.T,v1)*np.dot(v0.T,v1) - 4*np.dot(v0.T,v0)*np.dot(v1.T,v1))*(t1-t0)) h0 = (2*s+1)*(s-1)*(s-1) h1 = (-2*s+3)*s*s h2 = (1-s)*(1-s)*s h3 = (s-1)*s*s plt.plot([p0[0],p1[0]], [p0[1],p1[1]], ':c') plt.plot([p0[0], (p0+v0)[0]], [p0[1], (p0+v0)[1]], '-g') plt.plot([p1[0], (p1+v1)[0]], [p1[1], (p1+v1)[1]], '-g') return h0*p0 + h1*p1 + h2*v0*a0 + h3*v1*a1
5,331,755
def permutation_test(v1, v2, iter=1000): """ Conduct Permutation test Parameters ---------- v1 : array Vector 1. v2 : array Vector 2. iter : int. Default is 1000. The times for iteration. Returns ------- p : float The permutation test result, p-value. """ if len(v1) != len(v2): return "Invalid input" # permutation test diff = abs(np.average(v1) - np.average(v2)) v = np.hstack((v1, v2)) nv = v.shape[0] ni = 0 for i in range(iter): vshuffle = np.random.permutation(v) vshuffle1 = vshuffle[:int(nv/2)] vshuffle2 = vshuffle[int(nv/2):] diff_i = np.average(vshuffle1) - np.average(vshuffle2) if diff_i >= diff: ni = ni + 1 # permunitation test p-value p = np.float64(ni/iter) return p
5,331,756
def registered_paths(): """Return paths added via registration ..note:: This returns a copy of the registered paths and can therefore not be modified directly. """ return list(_registered_paths)
5,331,757
def second_pass_organizing_files(qc_path): """Second Pass at organizing qc txt files. Parameters ---------- qc_path : string existing path of qc_html directory Returns ------- None Notes ----- Combines files with same strategy. combines files for derivative falff , alff with others """ qc_files = os.listdir(qc_path) strat_dict = {} got_hp_lp = 0 got_bp = 0 for file_ in sorted(qc_files, reverse=True): if not ('.txt' in file_): continue str_ = file_ file_ = os.path.join(qc_path, file_) str_ = str_.replace('qc_scan_', '') str_ = str_.replace('.txt', '') str_ = str_.replace('____', '_') str_ = str_.replace('___', '_') str_ = str_.replace('__', '_') fwhm_val_ = '' # organize all derivatives excluding alff falff if '_bandpass_freqs_' in str_: if not str_ in strat_dict: strat_dict[str_] = [file_] else: print('Error: duplicate keys for files in QC 2nd file_org ' \ 'pass: %s %s' % (strat_dict[str_], file_)) raise # organize alff falff elif ('_hp_' in str_) and ('_lp_' in str_): key_ = '' key_1 = '' hp_lp_ = '' if '_fwhm_' in str_: key_1 = '' key_, hp_lp_ = str_.split('_hp_') ignore, fwhm_val_ = hp_lp_.split('_fwhm_') hp_lp_ = '_hp_' + ignore key_1 = '_fwhm_' + fwhm_val_ else: key_, hp_lp_ = str_.split('_hp_') hp_lp_ = '_hp_' + hp_lp_ flag_ = 0 for key in strat_dict.keys(): if (key_ in key) and (key_1 in key): append_to_files_in_dict_way(strat_dict[key], file_) str_ = strat_dict[key][0].replace('.txt', '') new_fname = str_ + hp_lp_ + '.txt' os.system('mv %s %s' %(strat_dict[key][0], new_fname)) del strat_dict[key] flag_ = 1 if flag_ == 1: os.system('rm -f %s' % file_) else: if not str_ in strat_dict: strat_dict[str_] = [file_] else: print('Error: duplicate keys for files in QC 2nd file_org ' \ 'pass: %s %s' % (strat_dict[str_], file_)) raise
5,331,758
def nms_dynamic(ctx, g, boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int, iou_threshold: float, score_threshold: float): """Rewrite symbolic function for default backend. Support max_output_boxes_per_class, iou_threshold, score_threshold of constant Tensor, which is aligned with ONNX's nms op. Args: ctx (ContextCaller): The context with additional information. g (Graph): The traced onnx graph. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. iou_threshold (float): IOU threshold of nms. score_threshold (float): score threshold of nms. Returns: NonMaxSuppression op for onnx. """ if not sym_help._is_value(max_output_boxes_per_class): max_output_boxes_per_class = g.op( 'Constant', value_t=torch.tensor(max_output_boxes_per_class, dtype=torch.long)) if not sym_help._is_value(iou_threshold): iou_threshold = g.op( 'Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) if not sym_help._is_value(score_threshold): score_threshold = g.op( 'Constant', value_t=torch.tensor([score_threshold], dtype=torch.float)) return g.op('NonMaxSuppression', boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
5,331,759
def api_timestamp_to_datetime(api_dt: Union[str, dict]): """Convertes the datetime string returned by the API to python datetime object""" """ Somehow this string is formatted with 7 digits for 'microsecond' resolution, so crop the last digit (and trailing Z) The cropped string will be written into api_dt_str_mod """ api_dt_str_mod = None if isinstance(api_dt, str): api_dt_str_mod = api_dt[:-2] elif isinstance(api_dt, dict): api_dt_str_mod = api_dt["dateTime"][:-2] else: raise dt = datetime.strptime(api_dt_str_mod, "%Y-%m-%dT%H:%M:%S.%f") dt = pytz.utc.localize(dt) return dt
5,331,760
def calc_mutation(offsprings: List[List[List[int]]], mut_rate: float, genes_num: int) -> List[List[List[int]]]: """ Not necessary, however when provided and returns value other than None, the simulator is going to use this one instead of the given ones by default, if you are not intending to use it leave it to `return None` """ return None
5,331,761
def compute_targets(ex_rois, gt_rois, weights=(1.0, 1.0, 1.0, 1.0)): """Compute bounding-box regression targets for an image.""" return box_utils.bbox_transform_inv(ex_rois, gt_rois, weights).astype( np.float32, copy=False )
5,331,762
def dmp_gf_sqf_list(f, u, K, all=False): """Compute square-free decomposition of ``f`` in ``GF(p)[X]``. """ raise NotImplementedError('multivariate polynomials over finite fields')
5,331,763
def _args_filter(args): """ zenith db api only accept list of tuple arguments for bind execute, that is ungainly so we should make all kind of arguments to list of tuple arguments """ if isinstance(args, (GeneratorType, )): args = list(args) if len(args) <= 0: return [] if isinstance(args[0], (tuple, list,)): return [tuple(v) for v in args] else: return [tuple(args), ]
5,331,764
def test_in_range_above(): """One page above current should be displayed.""" test_page = 5 current_page = 4 result = within_filter(test_page, current_page) assert result
5,331,765
def getAreaDF(spark): """ Returns a Spark DF containing the BLOCK geocodes and the Land and Water area columns Parameters ========== spark : SparkSession Returns ======= a Spark DF Notes ===== - Converts the AREALAND and AREAWATER columns from square meters to square miles - Used primarily for calculating Population Density """ area_cols = ['AREALAND', 'AREAWATER'] area = getGRFC(spark, columns=area_cols) for area_col in area_cols: area = area.withColumn(area_col, sf.col(area_col).cast("long")).persist() # calculation for converting square meters (current units for AREALAND from the GRFC) to square miles # square miles = square meters / 2,589,988 # https://www.census.gov/quickfacts/fact/note/US/LND110210 area = area.withColumn(area_col, sf.col(area_col) / sf.lit(2589988)).persist() area = area.withColumn("AREA_SQUARE_MILES", sf.expr(" + ".join(area_cols))).persist() return area
5,331,766
def test_SteepCBPCTL(): """Test STEEP effect on a circumbinary planet (Fleming et al., 2018, ApJ, 858, 86), but with the CTL model (Graham et al., in prep).""" # Remove old log file subprocess.run(['rm', 'STEEP_CTL.log'], cwd=cwd) # Run vplanet subprocess.run(['vplanet', 'vpl.in', '-q'], cwd=cwd) # Grab the output output = GetOutput(path=cwd) # Run our comparisons assert np.isclose(output.log.final.cbp.FreeEcc, 0.030000) assert np.isclose(output.log.final.cbp.Eccentricity, 0.031100) assert np.isclose(output.log.final.cbp.SemiMajorAxis, 1.048835e+11) assert np.isclose(output.log.final.secondary.Eccentricity, 0.313818) assert np.isclose(output.log.final.secondary.SemiMajorAxis, 0.095744) assert np.isclose(output.log.final.secondary.CriticalSemiMajorAxis, 0.307611)
5,331,767
def run_command(command, filename=None, repeat=1, silent=False): """ Run `command` with `filename` positional argument in the directory of the `filename`. If `filename` is not given, run only the command. """ if filename is not None: fdir = os.path.dirname(os.path.abspath(filename)) fname = os.path.basename(filename) cmd = command + ' ' + fname else: fdir = None cmd = command status = 0 for ii in range(repeat): if silent: with open(os.devnull, 'w') as devnull: st = subprocess.call(cmd.split(), cwd=fdir, stdout=devnull, stderr=devnull) else: st = subprocess.call(cmd.split(), cwd=fdir) status = status or st return status
5,331,768
def how_many(): """Check current number of issues waiting in SQS.""" if not is_request_valid(request): abort(400) lapdog_instance = Lapdog() lapdog_instance.how_many() return jsonify( response_type="in_channel", text="There are 4 issues waiting to be handled", )
5,331,769
def read_sbd(filepath): """Reads an .sbd file containing spectra in either profile or centroid mode Returns: list:List of spectra """ with open(filepath, 'rb') as in_file: header = struct.unpack("<BQB", in_file.read(10)) meta_size = header[1] * 20 # sizeof(QLfHH) meta = [meta_item for meta_item in struct.iter_unpack("<QLfHH", in_file.read(meta_size))] num_points = [meta_item[1] for meta_item in meta] spectra = [read_spectrum(in_file, n) for n in num_points] return (header, meta, spectra)
5,331,770
def dct2(X, blksize): """Calculate DCT transform of a 2D array, X In order for this work, we have to split X into blksize chunks""" dctm = dct_mat(blksize) #try: #blks = [sp.vsplit(x, X.shape[1]/blksize) for x in sp.hsplit(X, X.shape[0]/blksize)] #except: # print "Some error occurred" output = sp.zeros(X.shape) if output.ndim==3: for i in range(blksize,X.shape[0],blksize): for j in range(blksize, X.shape[1], blksize): for c in range(X.shape[2]): b = X[i-blksize:i, j-blksize:j, c] output[i-blksize:i, j-blksize:j, c] = sp.dot(sp.dot(dctm,b),dctm.T) elif output.ndim==2: for i in range(blksize,X.shape[0],blksize): for j in range(blksize, X.shape[1], blksize): b = X[i-blksize:i, j-blksize:j] output[i-blksize:i, j-blksize:j] = sp.dot(sp.dot(dctm,b),dctm.T) #blks = [sp.dot(sp.dot(dctm, b), dctm.T) for b in blks] #return sp.concatenate([blk for blk in blks]).reshape(X.shape) return output
5,331,771
def print_filtering(dataset, filter_vec, threshold, meta_name): """Function to select the filtering_names(names of those batches or cell types with less proportion of cells than threshold), and print an informative table with: batches/cell types, absolute_n_cells, relative_n_cells, Exluded or not. """ cell_count = filter_vec.value_counts(ascending=False) print("**", meta_name , "containing less than:", str(threshold), "of total cells are removed" +"\n" + "**", meta_name, "filtered based on our threshold") #dataframe informing about the filtering about to be done exclude_df = pd.DataFrame({meta_name: cell_count.index.to_list(), 'n_cells': cell_count.values, '%_cells': cell_count.values/dataset.n_obs, 'Excluded_?': cell_count.values/dataset.n_obs < threshold}) print(exclude_df) removal_names = exclude_df[meta_name][exclude_df["Excluded_?"] == True].tolist() return removal_names
5,331,772
def attempt_input_load(input_path): """Attempts to load the file at the provided path and return it as an array of lines. If the file does not exist we will exit the program since nothing useful can be done.""" if not os.path.isfile(input_path): print("Input file does not exist: %s" % input_path) exit() print("Loading input from file: %s" % input_path) with open(input_path, "r", encoding='utf-8') as f: lines = f.readlines() return lines
5,331,773
def get_chunk_tags(chunks: Dict, attrs: str): """ Get tags for :param chunks: :param attrs: :return: """ tags = [] for chunk in chunks: resource_type = chunk['resource_type'] original_url = chunk['url'] parse_result = urlparse(original_url) path = parse_result.path # If under STATIC_URL rewrite using static tag so that we respect static file storage # options, eg. ManifestStaticFileStorage if settings.STATIC_URL and path.startswith(settings.STATIC_URL): try: path = static(path[len(settings.STATIC_URL):]) except ValueError: # Allow url's that aren't managed by static files - eg. this will happen # for ManifestStaticFileStorage if file is not in the manifest pass url = ParseResult(**dict(parse_result._asdict(), path=path)).geturl() if resource_type == 'js': tags.append(f'<script type="text/javascript" src="{url}" {attrs}></script>') if resource_type == 'css': tags.append(f'<link type="text/css" href="{url}" rel="stylesheet" {attrs}/>') return tags
5,331,774
def __discount_PF(i, n): """ Present worth factor Factor: (P/F, i, N) Formula: P = F(1+i)^N :param i: :param n: :return: Cash Flow: F | | -------------- | P """ return (1 + i) ** (-n)
5,331,775
def update(upd_time): """ Send the notification to the users """ now = datetime.datetime.now() curr_time = {now.time().hour, now.time().minute} # Supper or lunch have_to_send = "" # Error message err_msg = "Si è verificato un *errore* all'interno di @UnicamEatBot, controllare il *menù* del _giorno odierno_" if curr_time == notification_lunch: have_to_send = "Pranzo" elif curr_time == notification_dinner: have_to_send = "Cena" per_bene = { 0 : "Lunedì", 1 : "Martedì", 2 : "Mercoledì", 3 : "Giovedì", 4 : "Venerdì", 5 : "Sabato", 6 : "Domenica" } if have_to_send: # Get the day day_week_day = datetime.datetime.today().weekday() day = per_bene[day_week_day] # Sending to Avack users if (day == "Lunedì" or day == "Martedì" or day == "Mercoledì" or day == "Giovedì") and have_to_send == "Pranzo" and canteen_closed_da == False: canteen = "D'Avack" msg_menu = db.get_updated_menu(canteen, day, have_to_send) if msg_menu == "Error": for chat_id in db.get_admins(): try: bot.sendMessage(chat_id, err_msg, parse_mode="Markdown") except telepot.exception.TelegramError as e: if e.error_code == 400: print(Fore.YELLOW + "[WARNING] Non sono riuscito ad inviare il messaggio a: " + chat_id) else: for chat_id in db.get_users_with_pref("notif_da", True): print(Fore.YELLOW + "[SENDING AVACK] Sto inviando un messaggio a: " + chat_id) keyboard = InlineKeyboardMarkup(inline_keyboard=[ [dict(text='Offrici una birra!', url="https://www.paypal.me/azzeccagarbugli")]]) # Prints the menu in a kawaii way try: bot.sendMessage(chat_id, msg_menu, parse_mode="Markdown", reply_markup=keyboard) except telepot.exception.TelegramError as e: if e.error_code == 400: print(Fore.YELLOW + "[WARNING] Non sono riuscito ad inviare il messaggio a: " + chat_id) # Sending to ColleParadiso users if (day == "Sabato" or day == "Domenica") and have_to_send == "Cena" and canteen_closed_cp == True: pass else: canteen = "Colle Paradiso" msg_menu = db.get_updated_menu(canteen, day, have_to_send) if msg_menu == "Error": for chat_id in db.get_admins(): try: bot.sendMessage(chat_id, err_msg, parse_mode="Markdown") except telepot.exception.TelegramError as e: if e.error_code == 400: print(Fore.YELLOW + "[WARNING] Non sono riuscito ad inviare il messaggio a: " + chat_id) else: if have_to_send == "Pranzo": l_or_d = "l" elif have_to_send == "Cena": l_or_d = "d" for chat_id in db.get_users_with_pref("notif_cp_" + l_or_d, True): print(Fore.YELLOW + "[SENDING COLLEPARADISO] Sto inviando un messaggio a: " + chat_id) keyboard = InlineKeyboardMarkup(inline_keyboard=[ [dict(text='Offrici una birra!', url="https://www.paypal.me/azzeccagarbugli")]]) try: bot.sendMessage(chat_id, msg_menu, parse_mode="Markdown", reply_markup=keyboard) except telepot.exception.TelegramError as e: if e.error_code == 400: print(Fore.YELLOW + "[WARNING] Non sono riuscito ad inviare il messaggio a: " + chat_id) elif curr_time in update_first or curr_time in update_second: db.update_menues() time.sleep(upd_time)
5,331,776
def step_impl(context): """ :type context: behave.runner.Context """ logger.info(f"The car has a max speed of {context.formule1.max_speed}")
5,331,777
def pw2dense(pw, maxd): """Make a pairwise distance matrix dense assuming -1 is used to encode D = 0""" pw = np.asarray(pw.todense()) pw[pw == 0] = maxd + 1 # pw[np.diag_indices_from(pw)] = 0 pw[pw == -1] = 0 return pw
5,331,778
def run_simulation(sim: td.Simulation) -> Awaitable[td.Simulation]: """Returns a simulation with simulation results Only submits simulation if results not found locally or remotely. First tries to load simulation results from disk. Then it tries to load them from the server storage. Finally, only submits simulation if not found .. code:: import gtidy3d as gm component = gf.components.straight(length=3) sim = gm.get_simulation(component=component) sim = run_simulation(sim).result() """ td.logging_level("error") sim_hash = get_sim_hash(sim) sim_path = PATH.results / f"{sim_hash}.hdf5" logger.info(f"running simulation {sim_hash}") hash_to_id = {d["task_name"][:32]: d["task_id"] for d in web.get_last_projects()} target = PATH.results / f"{sim_hash}.hdf5" # Try from local storage if sim_path.exists(): logger.info(f"{sim_path} found in local storage") sim = _executor.submit(load_results, sim, target) # Try from server storage elif sim_hash in hash_to_id: task_id = hash_to_id[sim_hash] sim = _executor.submit(load_results, sim, target, task_id) # Only submit if simulation not found else: task_id = _export_simulation(sim=sim, task_name=sim_hash) sim = _executor.submit(load_results, sim, target, task_id) return sim
5,331,779
def calculate_ucm_friction_factor_annular( ctx: "void*", ff_wG: "double*", ff_wL: "double*", ff_i: "double*" ) -> "int": """ **c++ signature** : ``HOOK_CALCULATE_UCM_FRICTION_FACTOR_ANNULAR(void* ctx, double* ff_wG, double* ff_wL, double* ff_i)`` Internal unit cell model `hook` to calculate the wall and interfacial friction factors for annular fluid flow pattern. The unit cell model represents a two phase flow with Gas and Liquid Phases. The output variables ``ff_wG``, ``ff_wL`` and ``ff_i`` are the Gas-Wall friction factor, Liquid-Wall friction factor and interfacial Gas-Liquid friction factor, respectively. This `hook` allows the developer to implement your own correlation for friction factor in a annular flow. :param ctx: ALFAsim's plugins context :param ff_wG: Gas-Wall Friction Factor :param ff_wL: Liquid-Wall Friction Factor :param ff_i: Interfacial Gas-Liquid Friction Factor :returns: Return OK if successful or anything different if failed Example of usage: The same example presented in :py:func:`HOOK_CALCULATE_UCM_FRICTION_FACTOR_STRATIFIED<alfasim_sdk._internal.hook_specs.calculate_ucm_friction_factor_stratified>` can be used, just change the `hook` name to `HOOK_CALCULATE_UCM_FRICTION_FACTOR_ANNULAR`. """
5,331,780
def get_cursor_position(fd=1): """Gets the current cursor position as an (x, y) tuple.""" csbi = get_console_screen_buffer_info(fd=fd) coord = csbi.dwCursorPosition return (coord.X, coord.Y)
5,331,781
def _held_karp(dists: np.ndarray) -> Tuple[float, np.ndarray]: """ Held-Karp algorithm solves the Traveling Salesman Problem. This algorithm uses dynamic programming with memoization. Parameters ---------- dists Distance matrix. Returns ------- The cost and the path. """ n = len(dists) # Maps each subset of the nodes to the cost to reach that subset, as well # as what node it passed before reaching this subset. # Node subsets are represented as set bits. C = {} # Set transition cost from initial state for k in range(1, n): C[1 << k, k] = (dists[0][k], 0) # Iterate subsets of increasing length and store intermediate results # in classic dynamic programming manner for subset_size in range(2, n): for subset in combinations(range(1, n), subset_size): # Set bits for all nodes in this subset bits = 0 for bit in subset: bits |= 1 << bit # Find the lowest cost to get to this subset for k in subset: prev = bits & ~(1 << k) res = [] for m in subset: if m == 0 or m == k: continue res.append((C[prev, m][0] + dists[m][k], m)) C[bits, k] = min(res) # We're interested in all bits but the least significant (the start state) bits = (2 ** n - 1) - 1 # Calculate optimal cost res = [] for k in range(1, n): res.append((C[bits, k][0] + dists[k][0], k)) opt, parent = min(res) # Backtrack to find full path path = [] for _ in range(n - 1): path.append(parent) new_bits = bits & ~(1 << parent) _, parent = C[bits, parent] bits = new_bits # Add implicit start state path.append(0) return opt, np.array(path)[::-1]
5,331,782
def process_log_data(spark, input_data, output_data): """Process the event log data storing users, time and songplay dimension tables. Arguments: spark -- SparkSession object input_data -- path to the raw event log data files output_data -- path to write out the resulting dimesion tables""" #TODO break out processing into one function / dimension table # specify schema for dataframe event_schema = build_event_schema() # read log data file events_df = from_disk(spark, event_schema, input_data, depth=3, extension='json') # filter by actions for song plays events_df = events_df.filter(events_df.page == 'NextSong') # apply consistent naming scheme retaining only these columns events_df = events_df.selectExpr([ 'firstName as first_name', 'lastName as last_name', 'userId as user_id', 'song as title', 'length as length', 'gender as gender', 'level as level', 'sessionId as session_id', 'location as location', 'page as page', 'ts as start_time']) # extract columns for users table users_table_df = events_df.select([ 'user_id', 'first_name', 'last_name', 'gender', 'level']) # filter out rows with empty user_ids users_table_df = users_table_df.filter(users_table_df.user_id != '') # write users table to parquet files inspect_df('users_table_df', users_table_df) to_disk(users_table_df, output_data + '/dim_user') # TODO create function to add these fields to the provided df # extract columns to create time table print(f'events_df.columns={events_df.columns}') # add time-related columns after removing unrelated columns time_table_df = add_time_columns(events_df.select(['start_time']), 'start_time') # TODO clean-up inspect_df('time_table_df 1', time_table_df) print(f'time_table_df.count()={time_table_df.count()}') # write time table to parquet files partitioned by year and month time_table_df.write.mode('overwrite').partitionBy('year', 'month').parquet(output_data + '/dim_time') # TODO clean-up inspect_df('time_table_df 2', time_table_df) print(f'time_table_df.count()={time_table_df.count()}') # read in song data to use for songplays table # s3://song-play-spark/dim_song/*.parquet song_df = events_df.select(['user_id', 'session_id', 'start_time', 'level', 'location']) inspect_df('song_df', song_df) # read in the song table song_table_df = from_disk(spark, None, output_data + '/dim_song/', extension='parquet') inspect_df('song_table_df', song_table_df) # read in the artist table artist_table_df = from_disk(spark, None, output_data + '/dim_artist/', extension='parquet') inspect_df('artist_table_df', artist_table_df) # inner join of dataframes on artist_id and selecting columns of interest song_artist_table_df = (song_table_df. join(artist_table_df, 'artist_id'). select(['song_id', 'title', 'duration', 'artist_id', 'artist_name'])) # TODO clean-up inspect_df('song_artist_table_df', song_artist_table_df) # extract columns from joined song and log datasets to create songplays table e_df = events_df.alias('e_df') sa_df = song_artist_table_df.alias('sa_df') # TODO clean-up inspect_df('sa_df', sa_df) inspect_df('e_df', e_df) cond = [e_df.title == sa_df.title, e_df.length == sa_df.duration] cols = [ 'first_name', 'last_name', 'user_id', 'gender', 'level', 'e_df.title', 'song_id', 'length', 'artist_id', 'artist_name', 'location', 'start_time'] songplay_table_df = (e_df.join(sa_df, cond)).select(cols) # TODO clean-up inspect_df('songplay_table_df', songplay_table_df) # # write songplays table to parquet files partitioned by year and month songplay_table_df = add_time_columns(songplay_table_df, 'start_time') # TODO clean-up print('songplay_table_df', songplay_table_df.columns) inspect_df('songplay_table_df', songplay_table_df) songplay_table_df.write.mode('overwrite').parquet(output_data + '/fact_songplay/', partitionBy=['year', 'month'])
5,331,783
def import_from_pickle(manager, folder, files, database): """Import folder with pickles into database. :param pathme_viewer.manager.Manager manager: PathMe manager :param str folder: folder to be imported :param iter[str] files: iterator with file names :param str database: resource name """ for file_name in tqdm.tqdm(files, desc='Loading {} pickles to populate PathMe database'.format(database)): file_path = os.path.join(folder, file_name) bel_pathway = from_pickle(file_path) pathway_id = os.path.splitext(file_name)[0] # KEGG files have a special format (prefix: unflatten/flatten needs to be removed) if database == KEGG: pathway_id = pathway_id.split('_')[0] pathway_dict = _prepare_pathway_model(pathway_id, database, bel_pathway) _ = manager.get_or_create_pathway(pathway_dict) log.info('%s has been loaded', database)
5,331,784
def dl_files(go_directory): """function to download latest ontologies and associations files from geneontology.org specify the directory to download the files to""" # change to go directory os.chdir(go_directory) # Get http://geneontology.org/ontology/go-basic.obo obo_fname = download_go_basic_obo() # print go file version: with open(obo_fname) as fin: for line in islice(fin, 1, 2): print(line) # download gene2go annotation file fin_gene2go = download_ncbi_associations() return obo_fname, fin_gene2go
5,331,785
def NS(s,o): """ Nash Sutcliffe efficiency coefficient Adapated to use in alarconpy by Albenis Pérez Alarcón contact: apalarcon1991@gmail.com Parameters -------------------------- input: s: simulated o: observed output: ns: Nash Sutcliffe efficient coefficient """ s,o = filter_nan(s,o) return 1 - sum((s-o)**2)/sum((o-np.mean(o))**2)
5,331,786
def UTArgs(v): """ tag UTArgs """ tag = SyntaxTag.TagUTArgs() tag.AddV(v) return tag
5,331,787
def rules_check(rulesengine_db, filename, output_path, query_start, query_end): """check if any rules match""" from src.praxxis.sqlite import sqlite_rulesengine rulesets = sqlite_rulesengine.get_active_rulesets(rulesengine_db, query_start, query_end) rulesmatch = [] hit = set() predictions = [] for ruleset in rulesets: filenames = sqlite_rulesengine.get_filenames_by_rule(ruleset[2]) for fmatch in filenames: if fmatch[0] in filename: rulesmatch.append(fmatch[1]) if rulesmatch != []: #get output from src.praxxis.notebook.notebook import get_output_from_filename output = get_output_from_filename(output_path) outputs = sqlite_rulesengine.get_outputs_for_rules(ruleset[2], rulesmatch) for omatch in outputs: if omatch[0] in output: hit.add(omatch[1]) predictions.extend(sqlite_rulesengine.get_predictions(ruleset[2], hit)) return predictions
5,331,788
def delete_shelf(shelf_name): """ Deletes shelf with given name :param shelf_name: str """ raise NotImplementedError()
5,331,789
def query( params, remote, query, level, query_res, since, before, local, out_format, assume_yes, no_progress, ): """Perform a query against a network node""" if level is not None: level = level.upper() for q_lvl in QueryLevel: if q_lvl.name == level: level = q_lvl break else: cli_error("Invalid level: %s" % level) if query_res is None and not sys.stdin.isatty(): log.debug("Reading query_res from stdin") query_res = sys.stdin if query_res is not None: in_str = query_res.read() if in_str: query_res = json_serializer.loads(in_str) else: query_res = None if sys.stdout.isatty(): if out_format is None: out_format = "tree" else: no_progress = True if out_format is None: out_format = "json" if out_format not in ("tree", "json"): cli_error("Invalid out-format: %s" % out_format) local = params["config"].get_local_node(local) remote_node = params["config"].get_remote_node(remote) net_ent = LocalEntity(local) qdat = _build_query(query, since, before) if len(qdat) == 0 and query_res is None and not assume_yes: if not click.confirm( "This query hasn't been limited in any " "way and may generate a huge result, " "continue?" ): return with ExitStack() as estack: if not no_progress: prog = RichProgressHook( estack.enter_context( Progress(console=params["rich_con"], transient=True) ) ) report = MultiListReport(description="query", prog_hook=prog) else: report = MultiListReport(description="query") qr = asyncio.run( net_ent.query(remote_node, level, qdat, query_res, report=report) ) if out_format == "tree": out = qr.to_tree() elif out_format == "json": out = json_serializer.dumps(qr, indent=4) click.echo(out) report.log_issues()
5,331,790
def shortstr(s,max_len=144,replace={'\n':';'}): """ Obtain a shorter string """ s = str(s) for k,v in replace.items(): s = s.replace(k,v) if max_len>0 and len(s) > max_len: s = s[:max_len-4]+' ...' return s
5,331,791
def update_gms_stats_collection( self, application: bool = None, dns: bool = None, drc: bool = None, drops: bool = None, dscp: bool = None, flow: bool = None, interface: bool = None, jitter: bool = None, port: bool = None, shaper: bool = None, top_talkers: bool = None, tunnel: bool = None, ) -> bool: """Enable/disable stats collection by orchestrator. All parameters optional. .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - gmsStatsCollection - POST - /gms/statsCollection :param application: Description missing in Swagger, defaults to None :type application: bool, optional :param dns: Description missing in Swagger, defaults to None :type dns: bool, optional :param drc: Description missing in Swagger, defaults to None :type drc: bool, optional :param drops: Description missing in Swagger, defaults to None :type drops: bool, optional :param dscp: Description missing in Swagger, defaults to None :type dscp: bool, optional :param flow: Description missing in Swagger, defaults to None :type flow: bool, optional :param interface: Description missing in Swagger, defaults to None :type interface: bool, optional :param jitter: Description missing in Swagger, defaults to None :type jitter: bool, optional :param port: Description missing in Swagger, defaults to None :type port: bool, optional :param shaper: Description missing in Swagger, defaults to None :type shaper: bool, optional :param top_talkers: Description missing in Swagger, defaults to None :type top_talkers: bool, optional :param tunnel: Description missing in Swagger, defaults to None :type tunnel: bool, optional :return: Returns True/False based on successful call. :rtype: bool """ data = {} if application is not None: data["Application"] = application if dns is not None: data["Dns"] = dns if drc is not None: data["Drc"] = drc if drops is not None: data["Drops"] = drops if dscp is not None: data["Dscp"] = dscp if flow is not None: data["Flow"] = flow if interface is not None: data["Interface"] = interface if jitter is not None: data["Jitter"] = jitter if port is not None: data["Port"] = port if shaper is not None: data["Shaper"] = shaper if top_talkers is not None: data["TopTalkers"] = top_talkers if tunnel is not None: data["Tunnel"] = tunnel return self._post( "/gms/statsCollection", data=data, return_type="bool", )
5,331,792
def test_process_fields(cbcsdk_mock): """Testing AsyncProcessQuery.set_fields().""" api = cbcsdk_mock.api guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00' # use the update methods process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"]) process = process.set_fields(["parent_hash", "device_policy"]) process_q_params = process._get_query_parameters() expected_params = {"query": "event_type:modload", "criteria": { "device_id": [1234] }, "exclusions": { "crossproc_effective_reputation": ["REP_WHITE"] }, "fields": [ "parent_hash", "device_policy" ]} assert process_q_params == expected_params
5,331,793
def _get_nearby_factories(latitude, longitude, radius): """Return nearby factories based on position and search range.""" # ref: https://stackoverflow.com/questions/574691/mysql-great-circle-distance-haversine-formula distance = 6371 * ACos( Cos(Radians(latitude)) * Cos(Radians("lat")) * Cos(Radians("lng") - Radians(longitude)) + Sin(Radians(latitude)) * Sin(Radians("lat")) ) radius_km = radius ids = Factory.objects.annotate(distance=distance).only("id").filter(distance__lt=radius_km).order_by("id") if len(ids) > settings.MAX_FACTORY_PER_GET: ids = _sample(ids, settings.MAX_FACTORY_PER_GET) return ( Factory.objects.filter(id__in=[obj.id for obj in ids]) .prefetch_related(Prefetch('report_records', queryset=ReportRecord.objects.only("created_at").all())) .prefetch_related(Prefetch('images', queryset=Image.objects.only("id").all())) .prefetch_related(Prefetch('documents', queryset=Document.objects.only('created_at', 'display_status').all())) .all() )
5,331,794
def add_register(request): """ 处理注册提交的数据,保存到数据库 :param request: :return: """ form = forms.RegisterForm(request.POST) if form.is_valid(): data = form.cleaned_data #清洗数据 data.pop("re_password") data['password'] = hash_pwd.has_password(data.get('password')) #添加必要数据 data['is_active'] = 1 #格式化储存 models.UserInfo.objects.create( **data ) return redirect('mysite:login') else: #把前端提交的包含错误信息的对象返回到前端页面 return render(request, 'login/register.html', {"form":form})
5,331,795
def confidence_interval(data, alpha=0.1): """ Calculate the confidence interval for each column in a pandas dataframe. @param data: A pandas dataframe with one or several columns. @param alpha: The confidence level, by default the 90% confidence interval is calculated. @return: A series where each entry contains the confidence-interval for the corresponding column. """ alpha = 0.1 t = lambda column: scipy_stats.t.isf(alpha/2.0, len(column)-1) width = lambda column: t(column) * numpy.std(column.values, ddof=1)/sqrt(len(column)) formatted_interval = lambda column: "%.2f +/- %.4f" % (column.mean(), width(column)) return pandas.Series([formatted_interval(data[c]) for c in data.columns], index=data.columns)
5,331,796
def test_datasets_str(): """Test that datasets are printed as expected.""" url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/' 'CONUS_20km/noaaport/catalog.xml') cat = TDSCatalog(url) assert str(cat.datasets) == ("['Full Collection (Reference / Forecast Time) Dataset', " "'Best NAM CONUS 20km Time Series', " "'Latest Collection for NAM CONUS 20km']")
5,331,797
def RunInTransactionOptions(options, function, *args, **kwargs): """Runs a function inside a datastore transaction. Runs the user-provided function inside a full-featured, ACID datastore transaction. Every Put, Get, and Delete call in the function is made within the transaction. All entities involved in these calls must belong to the same entity group. Queries are supported as long as they specify an ancestor belonging to the same entity group. The trailing arguments are passed to the function as positional arguments. If the function returns a value, that value will be returned by RunInTransaction. Otherwise, it will return None. The function may raise any exception to roll back the transaction instead of committing it. If this happens, the transaction will be rolled back and the exception will be re-raised up to RunInTransaction's caller. If you want to roll back intentionally, but don't have an appropriate exception to raise, you can raise an instance of datastore_errors.Rollback. It will cause a rollback, but will *not* be re-raised up to the caller. The function may be run more than once, so it should be idempotent. It should avoid side effects, and it shouldn't have *any* side effects that aren't safe to occur multiple times. This includes modifying the arguments, since they persist across invocations of the function. However, this doesn't include Put, Get, and Delete calls, of course. Example usage: > def decrement(key, amount=1): > counter = datastore.Get(key) > counter['count'] -= amount > if counter['count'] < 0: # don't let the counter go negative > raise datastore_errors.Rollback() > datastore.Put(counter) > > counter = datastore.Query('Counter', {'name': 'foo'}) > datastore.RunInTransaction(decrement, counter.key(), amount=5) Transactions satisfy the traditional ACID properties. They are: - Atomic. All of a transaction's operations are executed or none of them are. - Consistent. The datastore's state is consistent before and after a transaction, whether it committed or rolled back. Invariants such as "every entity has a primary key" are preserved. - Isolated. Transactions operate on a snapshot of the datastore. Other datastore operations do not see intermediated effects of the transaction; they only see its effects after it has committed. - Durable. On commit, all writes are persisted to the datastore. Nested transactions are not supported. Args: options: TransactionOptions specifying options (number of retries, etc) for this transaction function: a function to be run inside the transaction on all remaining arguments *args: positional arguments for function. **kwargs: keyword arguments for function. Returns: the function's return value, if any Raises: TransactionFailedError, if the transaction could not be committed. """ return _RunInTransactionInternal(options, datastore_rpc.TransactionMode.READ_WRITE, function, *args, **kwargs)
5,331,798
def notify(message, key, target_object=None, url=None, filter_exclude={}): """ Notify subscribing users of a new event. Key can be any kind of string, just make sure to reuse it where applicable! Object_id is some identifier of an object, for instance if a user subscribes to a specific comment thread, you could write: notify("there was a response to your comment", "comment_response", target_object=PostersObject, url=reverse('comments:view', args=(PostersObject.id,))) The below example notifies everyone subscribing to the "new_comments" key with the message "New comment posted". notify("New comment posted", "new_comments") filter_exclude: a dictionary to exclude special elements of subscriptions in the queryset, for instance filter_exclude={''} """ if _disable_notifications: return 0 if target_object: if not isinstance(target_object, Model): raise TypeError(_("You supplied a target_object that's not an instance of a django Model.")) object_id = target_object.id else: object_id = None objects = models.Notification.create_notifications( key, object_id=object_id, message=message, url=url, filter_exclude=filter_exclude, ) return len(objects)
5,331,799